hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
002a292129033a7331480d5093ed79609e07583b
| 300
|
py
|
Python
|
pants-plugins/experimental/pyoxidizer/register.py
|
sureshjoshi/pants-pyoxidizer-plugin
|
1e7c61d8e61de4f84c33dce2ef7349f448997b6b
|
[
"Apache-2.0"
] | 3
|
2022-01-12T15:41:12.000Z
|
2022-03-21T08:46:00.000Z
|
pants-plugins/experimental/pyoxidizer/register.py
|
sureshjoshi/pants-pyoxidizer-plugin
|
1e7c61d8e61de4f84c33dce2ef7349f448997b6b
|
[
"Apache-2.0"
] | 19
|
2022-01-13T02:55:55.000Z
|
2022-02-03T15:23:48.000Z
|
pants-plugins/experimental/pyoxidizer/register.py
|
sureshjoshi/pants-pyoxidizer-plugin
|
1e7c61d8e61de4f84c33dce2ef7349f448997b6b
|
[
"Apache-2.0"
] | null | null | null |
from experimental.pyoxidizer import subsystem
from experimental.pyoxidizer.rules import rules as pyoxidizer_rules
from experimental.pyoxidizer.target_types import PyOxidizerTarget
def rules():
return [*pyoxidizer_rules(), *subsystem.rules()]
def target_types():
return [PyOxidizerTarget]
| 25
| 67
| 0.806667
| 33
| 300
| 7.212121
| 0.363636
| 0.201681
| 0.327731
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116667
| 300
| 11
| 68
| 27.272727
| 0.898113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| true
| 0
| 0.428571
| 0.285714
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
0039e05a2e00541b246e1298da4882a416655fe2
| 2,967
|
py
|
Python
|
tests/parser/statements/test_VariableStatement.py
|
nyrLang/nyrLang
|
49c427fdd4aca4349c629fe220f3a235f305f50f
|
[
"MIT"
] | 3
|
2021-09-30T18:52:29.000Z
|
2021-11-09T03:41:38.000Z
|
tests/parser/statements/test_VariableStatement.py
|
niyrme/NyrLang
|
49c427fdd4aca4349c629fe220f3a235f305f50f
|
[
"MIT"
] | 14
|
2021-06-19T15:31:51.000Z
|
2021-08-25T14:11:24.000Z
|
tests/parser/statements/test_VariableStatement.py
|
niyrme/NyrLang
|
49c427fdd4aca4349c629fe220f3a235f305f50f
|
[
"MIT"
] | 1
|
2021-09-06T16:23:07.000Z
|
2021-09-06T16:23:07.000Z
|
import json
from nyr.parser import node
from nyr.parser.parser import Parser
def testDeclarationWithAssign():
ast = json.loads(
json.dumps(
Parser().parse("let x = 42;"),
cls=node.ComplexEncoder,
),
)
expected = {
"type": "Program",
"body": [
{
"type": "VariableStatement",
"declarations": [
{
"type": "VariableDeclaration",
"id": {
"type": "Identifier",
"name": "x",
},
"init": {
"type": "IntegerLiteral",
"value": 42,
},
},
],
},
],
}
assert ast == expected
def testDeclarationWithoutAssign():
ast = json.loads(
json.dumps(
Parser().parse("let x;"),
cls=node.ComplexEncoder,
),
)
expected = {
"type": "Program",
"body": [
{
"type": "VariableStatement",
"declarations": [
{
"type": "VariableDeclaration",
"id": {
"type": "Identifier",
"name": "x",
},
"init": None,
},
],
},
],
}
assert ast == expected
def testMultipleDeclarationsWithoutAssign():
ast = json.loads(
json.dumps(
Parser().parse("let x, y;"),
cls=node.ComplexEncoder,
),
)
expected = {
"type": "Program",
"body": [
{
"type": "VariableStatement",
"declarations": [
{
"type": "VariableDeclaration",
"id": {
"type": "Identifier",
"name": "x",
},
"init": None,
},
{
"type": "VariableDeclaration",
"id": {
"type": "Identifier",
"name": "y",
},
"init": None,
},
],
},
],
}
assert ast == expected
def testMultipleDeclarationsWithPartialAssign():
ast = json.loads(
json.dumps(
Parser().parse("let x, y = 42;"),
cls=node.ComplexEncoder,
),
)
expected = {
"type": "Program",
"body": [
{
"type": "VariableStatement",
"declarations": [
{
"type": "VariableDeclaration",
"id": {
"type": "Identifier",
"name": "x",
},
"init": None,
},
{
"type": "VariableDeclaration",
"id": {
"type": "Identifier",
"name": "y",
},
"init": {
"type": "IntegerLiteral",
"value": 42,
},
},
],
},
],
}
assert ast == expected
def testMultipleDeclarationsWithAllAssign():
ast = json.loads(
json.dumps(
Parser().parse("let x = 7, y = 42;"),
cls=node.ComplexEncoder,
),
)
expected = {
"type": "Program",
"body": [
{
"type": "VariableStatement",
"declarations": [
{
"type": "VariableDeclaration",
"id": {
"type": "Identifier",
"name": "x",
},
"init": {
"type": "IntegerLiteral",
"value": 7,
},
},
{
"type": "VariableDeclaration",
"id": {
"type": "Identifier",
"name": "y",
},
"init": {
"type": "IntegerLiteral",
"value": 42,
},
},
],
},
],
}
assert ast == expected
| 15.534031
| 48
| 0.473879
| 224
| 2,967
| 6.276786
| 0.169643
| 0.130868
| 0.142248
| 0.165007
| 0.831437
| 0.831437
| 0.800142
| 0.800142
| 0.800142
| 0.705548
| 0
| 0.007046
| 0.3303
| 2,967
| 190
| 49
| 15.615789
| 0.700554
| 0
| 0
| 0.633136
| 0
| 0
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0.029586
| 1
| 0.029586
| false
| 0
| 0.017751
| 0
| 0.047337
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0050fb1112915c1ef3f3878b0a2aa8a3295e6a5e
| 7,254
|
py
|
Python
|
models/VGG16.py
|
Apiquet/Tracking_SSD_ReID
|
93e0f88879b9354921569ad34139a0b3354d4ee4
|
[
"MIT"
] | 3
|
2021-02-28T06:05:23.000Z
|
2022-03-07T14:21:36.000Z
|
models/VGG16.py
|
Apiquet/Tracking_SSD_ReID
|
93e0f88879b9354921569ad34139a0b3354d4ee4
|
[
"MIT"
] | null | null | null |
models/VGG16.py
|
Apiquet/Tracking_SSD_ReID
|
93e0f88879b9354921569ad34139a0b3354d4ee4
|
[
"MIT"
] | 2
|
2021-12-25T17:17:31.000Z
|
2022-02-25T15:05:25.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
VGG16 implementation: https://arxiv.org/abs/1409.1556
"""
from tensorflow import keras
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten
class VGG16(keras.Model):
def __init__(self, input_shape=(224, 224, 3)):
super(VGG16, self).__init__()
'''
Available layers
Typo: layerType_Stage_NumberInStage_Info
'''
self.conv_1_1_64 = Conv2D(input_shape=input_shape,
filters=64,
kernel_size=(3, 3),
padding="same",
activation="relu",
name="Conv1_1")
self.conv_1_2_64 = Conv2D(filters=64,
kernel_size=(3, 3),
padding="same",
activation="relu",
name="Conv1_2")
self.maxpool_1_3_2x2 = MaxPool2D(pool_size=(2, 2),
strides=(2, 2),
padding='same')
self.conv_2_1_128 = Conv2D(filters=128,
kernel_size=(3, 3),
padding="same",
activation="relu",
name="Conv2_1")
self.conv_2_2_128 = Conv2D(filters=128,
kernel_size=(3, 3),
padding="same",
activation="relu",
name="Conv2_2")
self.maxpool_2_3_2x2 = MaxPool2D(pool_size=(2, 2),
strides=(2, 2),
padding='same')
self.conv_3_1_256 = Conv2D(filters=256,
kernel_size=(3, 3),
padding="same",
activation="relu",
name="Conv3_1")
self.conv_3_2_256 = Conv2D(filters=256,
kernel_size=(3, 3),
padding="same",
activation="relu",
name="Conv3_2")
self.conv_3_3_256 = Conv2D(filters=256,
kernel_size=(3, 3),
padding="same",
activation="relu",
name="Conv3_3")
self.maxpool_3_4_2x2 = MaxPool2D(pool_size=(2, 2),
strides=(2, 2),
padding='same')
self.conv_4_1_512 = Conv2D(filters=512,
kernel_size=(3, 3),
padding="same",
activation="relu",
name="Conv4_1")
self.conv_4_2_512 = Conv2D(filters=512,
kernel_size=(3, 3),
padding="same",
activation="relu",
name="Conv4_2")
self.conv_4_3_512 = Conv2D(filters=512,
kernel_size=(3, 3),
padding="same",
activation="relu",
name="Conv4_3")
self.maxpool_4_4_2x2 = MaxPool2D(pool_size=(2, 2),
strides=(2, 2),
padding='same')
self.conv_5_1_512 = Conv2D(filters=512,
kernel_size=(3, 3),
padding="same",
activation="relu",
name="Conv5_1")
self.conv_5_2_512 = Conv2D(filters=512,
kernel_size=(3, 3),
padding="same",
activation="relu",
name="Conv5_2")
self.conv_5_3_512 = Conv2D(filters=512,
kernel_size=(3, 3),
padding="same",
activation="relu",
name="Conv5_3")
self.maxpool_5_4_2x2 = MaxPool2D(pool_size=(2, 2),
strides=(2, 2),
padding='same')
self.flatten_6_1 = Flatten()
self.dense_6_2_4096 = Dense(4096, activation='relu')
self.dense_6_3_4096 = Dense(4096, activation='relu')
self.dense_6_4_10 = Dense(2, activation='softmax')
def getModel(self):
return keras.models.Sequential([
self.conv_1_1_64,
self.conv_1_2_64,
self.maxpool_1_3_2x2,
# Stage 2
self.conv_2_1_128,
self.conv_2_2_128,
self.maxpool_2_3_2x2,
# Stage 3
self.conv_3_1_256,
self.conv_3_2_256,
self.conv_3_3_256,
self.maxpool_3_4_2x2,
# Stage 4
self.conv_4_1_512,
self.conv_4_2_512,
self.conv_4_3_512,
self.maxpool_4_4_2x2,
# Stage 5
self.conv_5_1_512,
self.conv_5_2_512,
self.conv_5_3_512,
self.maxpool_5_4_2x2,
# Stage 6
self.flatten_6_1,
self.dense_6_2_4096,
self.dense_6_3_4096,
self.dense_6_4_10])
def getUntilStage4(self):
return keras.models.Sequential([
self.conv_1_1_64,
self.conv_1_2_64,
self.maxpool_1_3_2x2,
# Stage 2
self.conv_2_1_128,
self.conv_2_2_128,
self.maxpool_2_3_2x2,
# Stage 3
self.conv_3_1_256,
self.conv_3_2_256,
self.conv_3_3_256,
self.maxpool_3_4_2x2,
# Stage 4
self.conv_4_1_512,
self.conv_4_2_512,
self.conv_4_3_512])
def getUntilStage5(self):
return keras.models.Sequential([
self.conv_1_1_64,
self.conv_1_2_64,
self.maxpool_1_3_2x2,
# Stage 2
self.conv_2_1_128,
self.conv_2_2_128,
self.maxpool_2_3_2x2,
# Stage 3
self.conv_3_1_256,
self.conv_3_2_256,
self.conv_3_3_256,
self.maxpool_3_4_2x2,
# Stage 4
self.conv_4_1_512,
self.conv_4_2_512,
self.conv_4_3_512,
# Stage 5
self.maxpool_4_4_2x2,
self.conv_5_1_512,
self.conv_5_2_512,
self.conv_5_3_512])
def getStage5(self):
return keras.models.Sequential([
self.maxpool_4_4_2x2,
self.conv_5_1_512,
self.conv_5_2_512,
self.conv_5_3_512])
| 37.979058
| 69
| 0.409981
| 740
| 7,254
| 3.644595
| 0.104054
| 0.154245
| 0.053022
| 0.057842
| 0.836856
| 0.745643
| 0.734149
| 0.734149
| 0.706711
| 0.706711
| 0
| 0.149304
| 0.505101
| 7,254
| 190
| 70
| 38.178947
| 0.60195
| 0.026606
| 0
| 0.662338
| 0
| 0
| 0.033127
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032468
| false
| 0
| 0.012987
| 0.025974
| 0.077922
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cc3e34eec5af16ed50cd7ad22777241731e1ce22
| 199
|
py
|
Python
|
ctypes_generation/extended_structs/_RPC_IF_ID.py
|
IMULMUL/PythonForWindows
|
61e027a678d5b87aa64fcf8a37a6661a86236589
|
[
"BSD-3-Clause"
] | 479
|
2016-01-08T00:53:34.000Z
|
2022-03-22T10:28:19.000Z
|
ctypes_generation/extended_structs/_RPC_IF_ID.py
|
IMULMUL/PythonForWindows
|
61e027a678d5b87aa64fcf8a37a6661a86236589
|
[
"BSD-3-Clause"
] | 38
|
2017-12-29T17:09:04.000Z
|
2022-01-31T08:27:47.000Z
|
ctypes_generation/extended_structs/_RPC_IF_ID.py
|
IMULMUL/PythonForWindows
|
61e027a678d5b87aa64fcf8a37a6661a86236589
|
[
"BSD-3-Clause"
] | 103
|
2016-01-10T01:32:17.000Z
|
2021-12-24T17:21:06.000Z
|
INITIAL_RPC_IF_ID = RPC_IF_ID
class _RPC_IF_ID(INITIAL_RPC_IF_ID):
def __repr__(self):
return '<RPC_IF_ID "{0}" ({1}, {2})>'.format(self.Uuid.to_string(), self.VersMajor, self.VersMinor)
| 39.8
| 107
| 0.703518
| 34
| 199
| 3.588235
| 0.529412
| 0.204918
| 0.286885
| 0.229508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017341
| 0.130653
| 199
| 5
| 107
| 39.8
| 0.687861
| 0
| 0
| 0
| 0
| 0
| 0.14
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
cca3db4418550a32bb65a4a56eda22b9d89dc12e
| 1,938
|
py
|
Python
|
application/modules/post/windows-priv-check/wpc/users.py
|
cys3c/viper-shell
|
e05a07362b7d1e6d73c302a24d2506846e43502c
|
[
"PSF-2.0",
"BSD-2-Clause"
] | 2
|
2018-06-30T03:21:30.000Z
|
2020-03-22T02:31:02.000Z
|
application/modules/post/windows-priv-check/wpc/users.py
|
cys3c/viper-shell
|
e05a07362b7d1e6d73c302a24d2506846e43502c
|
[
"PSF-2.0",
"BSD-2-Clause"
] | null | null | null |
application/modules/post/windows-priv-check/wpc/users.py
|
cys3c/viper-shell
|
e05a07362b7d1e6d73c302a24d2506846e43502c
|
[
"PSF-2.0",
"BSD-2-Clause"
] | 3
|
2017-11-15T11:08:20.000Z
|
2020-03-22T02:31:03.000Z
|
from wpc.user import user
import win32net
import wpc.conf
import pywintypes
class users():
def __init__(self):
self.users = []
def get_filtered(self, ):
if self.users == []:
try:
level = 1
resume = 0
while True:
userlist, total, resume = win32net.NetUserEnum(wpc.conf.remote_server, level, 0, resume, 999999)
#print u
for u in userlist:
# self.users.append(user['name'])
#try:
sid, name, type = wpc.conf.cache.LookupAccountName(wpc.conf.remote_server, u['name'])
self.users.append(user(sid))
#except:
# print "[E] failed to lookup sid of %s" % user['name']
if resume == 0:
break
except pywintypes.error as e:
print "[E] %s: %s" % (e[1], e[2])
return self.users
def get_all(self):
if self.users == []:
try:
level = 0
resume = 0
while True:
userlist, total, resume = win32net.NetUserEnum(wpc.conf.remote_server, level, 0, resume, 999999)
#print u
for u in userlist:
# self.users.append(user['name'])
#try:
sid, name, type = wpc.conf.cache.LookupAccountName(wpc.conf.remote_server, u['name'])
self.users.append(user(sid))
#except:
# print "[E] failed to lookup sid of %s" % user['name']
if resume == 0:
break
except pywintypes.error as e:
print "[E] %s: %s" % (e[1], e[2])
return self.users
| 37.269231
| 116
| 0.428793
| 195
| 1,938
| 4.210256
| 0.261538
| 0.09866
| 0.063337
| 0.09257
| 0.855055
| 0.855055
| 0.799026
| 0.799026
| 0.799026
| 0.799026
| 0
| 0.029126
| 0.468524
| 1,938
| 51
| 117
| 38
| 0.767961
| 0.110423
| 0
| 0.702703
| 0
| 0
| 0.016336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.108108
| null | null | 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
aeb418645c3c70c9c562fe5973ee0700ab60b6dd
| 17,228
|
py
|
Python
|
tests/cmhc/test_data_parser.py
|
JasonMWhite/cmhc-scraper
|
1c62c9b26fb83814af415b7e9e017cc8d062347b
|
[
"MIT"
] | 2
|
2018-10-17T07:48:35.000Z
|
2019-02-11T18:56:26.000Z
|
tests/cmhc/test_data_parser.py
|
JasonMWhite/cmhc-scraper
|
1c62c9b26fb83814af415b7e9e017cc8d062347b
|
[
"MIT"
] | null | null | null |
tests/cmhc/test_data_parser.py
|
JasonMWhite/cmhc-scraper
|
1c62c9b26fb83814af415b7e9e017cc8d062347b
|
[
"MIT"
] | 1
|
2019-03-14T23:41:29.000Z
|
2019-03-14T23:41:29.000Z
|
import pytest
from cmhc.cmhc.spiders.stats import StatsSpider
@pytest.fixture()
def data():
return """
<div class="data-widget">
<form action="/hmip-pimh/en/TableMapChart/RenderTable" data-ajax="true" data-ajax-mode="replace" data-ajax-update="#predefinedTable-data-area" id="PredefinedTableForm" method="post"></form>
<div class="table-section">
<form action="/hmip-pimh/en/CustomTable/Export" method="post">
<table class="CawdDataTable">
<thead>
<tr> <td data-sort-key="sgc_census_tract_cde" rowspan="1"> </td>
<th colspan="2" data-sort-key="cell-1" scope="col"><span>Bachelor</span></th>
<th colspan="2" data-sort-key="cell-3" scope="col"><span>1 Bedroom</span></th>
<th colspan="2" data-sort-key="cell-5" scope="col"><span>2 Bedroom</span></th>
<th colspan="2" data-sort-key="cell-7" scope="col"><span>3 Bedroom +</span></th>
<th colspan="2" data-sort-key="cell-9" scope="col"><span>Total</span></th>
</tr>
</thead>
<tfoot>
<tr> <th scope="row">Guelph</th>
<td class="">**</td>
<td class=""></td>
<td class="numericalData">0.7</td>
<td class="">a </td>
<td class="numericalData">0.9</td>
<td class="">a </td>
<td class="numericalData">1.2</td>
<td class="">a </td>
<td class="numericalData">0.9</td>
<td class="">a </td>
</tr>
</tfoot>
<tbody>
<tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="0" scope="row">0001.02</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="" data-field="cell-3" data-value="">**</td>
<td class="" data-field="cell-4" data-value="**"></td>
<td class="" data-field="cell-5" data-value="">**</td>
<td class="" data-field="cell-6" data-value="**"></td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="" data-field="cell-9" data-value="">**</td>
<td class="" data-field="cell-10" data-value="**"></td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="1" scope="row">0001.03</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="numericalData" data-field="cell-3" data-value="0.7">0.7</td>
<td class="" data-field="cell-4" data-value="a ">a </td>
<td class="numericalData" data-field="cell-5" data-value="0.7">0.7</td>
<td class="" data-field="cell-6" data-value="a ">a </td>
<td class="numericalData" data-field="cell-7" data-value="0.3">0.3</td>
<td class="" data-field="cell-8" data-value="a ">a </td>
<td class="numericalData" data-field="cell-9" data-value="0.5">0.5</td>
<td class="" data-field="cell-10" data-value="a ">a </td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="2" scope="row">0001.06</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value=""></td>
<td class="" data-field="cell-3" data-value="">**</td>
<td class="" data-field="cell-4" data-value=""></td>
<td class="" data-field="cell-5" data-value="">**</td>
<td class="" data-field="cell-6" data-value="**"></td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="" data-field="cell-9" data-value="">**</td>
<td class="" data-field="cell-10" data-value="**"></td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="3" scope="row">0002.00</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="" data-field="cell-3" data-value="">**</td>
<td class="" data-field="cell-4" data-value="**"></td>
<td class="" data-field="cell-5" data-value="">**</td>
<td class="" data-field="cell-6" data-value="**"></td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="" data-field="cell-9" data-value="">**</td>
<td class="" data-field="cell-10" data-value="**"></td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="4" scope="row">0003.00</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="numericalData" data-field="cell-3" data-value="0.0">0.0</td>
<td class="" data-field="cell-4" data-value="d ">d </td>
<td class="" data-field="cell-5" data-value="">**</td>
<td class="" data-field="cell-6" data-value="**"></td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="" data-field="cell-9" data-value="">**</td>
<td class="" data-field="cell-10" data-value="**"></td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="5" scope="row">0004.01</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value=""></td>
<td class="" data-field="cell-3" data-value="">**</td>
<td class="" data-field="cell-4" data-value=""></td>
<td class="" data-field="cell-5" data-value="">**</td>
<td class="" data-field="cell-6" data-value="**"></td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="" data-field="cell-9" data-value="">**</td>
<td class="" data-field="cell-10" data-value="**"></td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="6" scope="row">0004.03</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value=""></td>
<td class="" data-field="cell-3" data-value="">**</td>
<td class="" data-field="cell-4" data-value="**"></td>
<td class="" data-field="cell-5" data-value="">**</td>
<td class="" data-field="cell-6" data-value="**"></td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="" data-field="cell-9" data-value="">**</td>
<td class="" data-field="cell-10" data-value="**"></td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="7" scope="row">0005.00</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="numericalData" data-field="cell-3" data-value="0.0">0.0</td>
<td class="" data-field="cell-4" data-value="d ">d </td>
<td class="numericalData" data-field="cell-5" data-value="0.0">0.0</td>
<td class="" data-field="cell-6" data-value="d ">d </td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="numericalData" data-field="cell-9" data-value="0.0">0.0</td>
<td class="" data-field="cell-10" data-value="d ">d </td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="8" scope="row">0006.00</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="" data-field="cell-3" data-value="">**</td>
<td class="" data-field="cell-4" data-value="**"></td>
<td class="numericalData" data-field="cell-5" data-value="1.1">1.1</td>
<td class="" data-field="cell-6" data-value="a ">a </td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="numericalData" data-field="cell-9" data-value="2.9">2.9</td>
<td class="" data-field="cell-10" data-value="c ">c </td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="9" scope="row">0007.00</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="numericalData" data-field="cell-3" data-value="0.0">0.0</td>
<td class="" data-field="cell-4" data-value="d ">d </td>
<td class="numericalData" data-field="cell-5" data-value="0.0">0.0</td>
<td class="" data-field="cell-6" data-value="c ">c </td>
<td class="numericalData" data-field="cell-7" data-value="0.0">0.0</td>
<td class="" data-field="cell-8" data-value="d ">d </td>
<td class="numericalData" data-field="cell-9" data-value="0.0">0.0</td>
<td class="" data-field="cell-10" data-value="d ">d </td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="10" scope="row">0008.00</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="numericalData" data-field="cell-3" data-value="0.6">0.6</td>
<td class="" data-field="cell-4" data-value="a ">a </td>
<td class="numericalData" data-field="cell-5" data-value="1.0">1.0</td>
<td class="" data-field="cell-6" data-value="a ">a </td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="numericalData" data-field="cell-9" data-value="0.8">0.8</td>
<td class="" data-field="cell-10" data-value="a ">a </td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="11" scope="row">0009.03</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value=""></td>
<td class="" data-field="cell-3" data-value="">**</td>
<td class="" data-field="cell-4" data-value=""></td>
<td class="" data-field="cell-5" data-value="">**</td>
<td class="" data-field="cell-6" data-value="**"></td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="" data-field="cell-9" data-value="">**</td>
<td class="" data-field="cell-10" data-value="**"></td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="12" scope="row">0009.04</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="numericalData" data-field="cell-3" data-value="0.7">0.7</td>
<td class="" data-field="cell-4" data-value="a ">a </td>
<td class="" data-field="cell-5" data-value="">**</td>
<td class="" data-field="cell-6" data-value="**"></td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="numericalData" data-field="cell-9" data-value="0.2">0.2</td>
<td class="" data-field="cell-10" data-value="a ">a </td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="13" scope="row">0010.01</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="numericalData" data-field="cell-3" data-value="0.5">0.5</td>
<td class="" data-field="cell-4" data-value="a ">a </td>
<td class="numericalData" data-field="cell-5" data-value="0.5">0.5</td>
<td class="" data-field="cell-6" data-value="a ">a </td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="numericalData" data-field="cell-9" data-value="0.5">0.5</td>
<td class="" data-field="cell-10" data-value="a ">a </td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="14" scope="row">0010.02</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="numericalData" data-field="cell-3" data-value="0.0">0.0</td>
<td class="" data-field="cell-4" data-value="d ">d </td>
<td class="numericalData" data-field="cell-5" data-value="0.0">0.0</td>
<td class="" data-field="cell-6" data-value="d ">d </td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="numericalData" data-field="cell-9" data-value="0.0">0.0</td>
<td class="" data-field="cell-10" data-value="d ">d </td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="15" scope="row">0011.00</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="numericalData" data-field="cell-3" data-value="0.0">0.0</td>
<td class="" data-field="cell-4" data-value="d ">d </td>
<td class="numericalData" data-field="cell-5" data-value="0.0">0.0</td>
<td class="" data-field="cell-6" data-value="d ">d </td>
<td class="numericalData" data-field="cell-7" data-value="0.0">0.0</td>
<td class="" data-field="cell-8" data-value="a ">a </td>
<td class="numericalData" data-field="cell-9" data-value="0.0">0.0</td>
<td class="" data-field="cell-10" data-value="d ">d </td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="16" scope="row">0012.00</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="numericalData" data-field="cell-3" data-value="0.0">0.0</td>
<td class="" data-field="cell-4" data-value="d ">d </td>
<td class="numericalData" data-field="cell-5" data-value="1.1">1.1</td>
<td class="" data-field="cell-6" data-value="d ">d </td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="numericalData" data-field="cell-9" data-value="0.7">0.7</td>
<td class="" data-field="cell-10" data-value="b ">b </td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="17" scope="row">0013.01</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="" data-field="cell-3" data-value="">**</td>
<td class="" data-field="cell-4" data-value="**"></td>
<td class="" data-field="cell-5" data-value="">**</td>
<td class="" data-field="cell-6" data-value="**"></td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="" data-field="cell-9" data-value="">**</td>
<td class="" data-field="cell-10" data-value="**"></td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="18" scope="row">0013.02</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="numericalData" data-field="cell-3" data-value="0.0">0.0</td>
<td class="" data-field="cell-4" data-value="d ">d </td>
<td class="numericalData" data-field="cell-5" data-value="1.1">1.1</td>
<td class="" data-field="cell-6" data-value="d ">d </td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="numericalData" data-field="cell-9" data-value="1.5">1.5</td>
<td class="" data-field="cell-10" data-value="c ">c </td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="19" scope="row">0014.00</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="" data-field="cell-3" data-value="">**</td>
<td class="" data-field="cell-4" data-value="**"></td>
<td class="" data-field="cell-5" data-value="">**</td>
<td class="" data-field="cell-6" data-value="**"></td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="" data-field="cell-9" data-value="">**</td>
<td class="" data-field="cell-10" data-value="**"></td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="20" scope="row">0015.00</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value="**"></td>
<td class="numericalData" data-field="cell-3" data-value="0.0">0.0</td>
<td class="" data-field="cell-4" data-value="d ">d </td>
<td class="numericalData" data-field="cell-5" data-value="0.0">0.0</td>
<td class="" data-field="cell-6" data-value="c ">c </td>
<td class="numericalData" data-field="cell-7" data-value="0.9">0.9</td>
<td class="" data-field="cell-8" data-value="a ">a </td>
<td class="numericalData" data-field="cell-9" data-value="0.2">0.2</td>
<td class="" data-field="cell-10" data-value="b ">b </td>
</tr><tr> <th class="first-cell" data-field="sgc_census_tract_cde" data-value="21" scope="row">0100.00</th>
<td class="" data-field="cell-1" data-value="">**</td>
<td class="" data-field="cell-2" data-value=""></td>
<td class="" data-field="cell-3" data-value="">**</td>
<td class="" data-field="cell-4" data-value="**"></td>
<td class="" data-field="cell-5" data-value="">**</td>
<td class="" data-field="cell-6" data-value="**"></td>
<td class="" data-field="cell-7" data-value="">**</td>
<td class="" data-field="cell-8" data-value="**"></td>
<td class="" data-field="cell-9" data-value="">**</td>
<td class="" data-field="cell-10" data-value="**"></td>
</tr>
</tbody>
</table>
<input type="hidden" name="exportType" id="exportType" value="pdf" />
<input type="hidden" name="title" value="Vacancy Rates by Bedroom Type by Zone "/>
</form></div>
"""
def test_parse_vacancy_data(data):
result = [x for x in StatsSpider.extract_data(data)]
result = sorted(result, key=lambda d: d['name'])
assert len(result) == 22
assert result[0] == {
'name': '0001.02',
'1 Bedroom': None,
'2 Bedroom': None,
'3 Bedroom +': None,
'Bachelor': None,
'Total': None,
}
assert result[1] == {
'name': '0001.03',
'1 Bedroom': '0.7',
'2 Bedroom': '0.7',
'3 Bedroom +': '0.3',
'Bachelor': None,
'Total': '0.5',
}
| 55.935065
| 189
| 0.621198
| 2,991
| 17,228
| 3.553661
| 0.04547
| 0.204911
| 0.269075
| 0.272462
| 0.88823
| 0.88061
| 0.88061
| 0.878069
| 0.876188
| 0.873271
| 0
| 0.038928
| 0.081495
| 17,228
| 307
| 190
| 56.117264
| 0.632773
| 0
| 0
| 0.722772
| 0
| 0.349835
| 0.967959
| 0.375609
| 0
| 0
| 0
| 0
| 0.009901
| 1
| 0.006601
| false
| 0
| 0.006601
| 0.0033
| 0.016502
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
aed21ded31537db99f2aa60f1c3851fe730a3aa8
| 2,341
|
py
|
Python
|
faeAuditor/auditResults/migrations/0005_auto_20181025_1515.py
|
opena11y/fae-auditor
|
ea9099b37b77ddc30092b0cdd962647c92b143a7
|
[
"Apache-2.0"
] | 2
|
2018-02-28T19:03:28.000Z
|
2021-09-30T13:40:23.000Z
|
faeAuditor/auditResults/migrations/0005_auto_20181025_1515.py
|
opena11y/fae-auditor
|
ea9099b37b77ddc30092b0cdd962647c92b143a7
|
[
"Apache-2.0"
] | 6
|
2020-02-11T21:53:58.000Z
|
2022-02-10T07:57:58.000Z
|
faeAuditor/auditResults/migrations/0005_auto_20181025_1515.py
|
opena11y/fae-auditor
|
ea9099b37b77ddc30092b0cdd962647c92b143a7
|
[
"Apache-2.0"
] | 1
|
2019-12-05T06:05:20.000Z
|
2019-12-05T06:05:20.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-10-25 20:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auditResults', '0004_auto_20181024_1326'),
]
operations = [
migrations.AlterField(
model_name='auditguidelineresult',
name='implementation_pass_fail_score',
field=models.DecimalField(decimal_places=1, max_digits=4),
),
migrations.AlterField(
model_name='auditguidelineresult',
name='implementation_score',
field=models.DecimalField(decimal_places=1, max_digits=4),
),
migrations.AlterField(
model_name='auditresult',
name='implementation_pass_fail_score',
field=models.DecimalField(decimal_places=1, max_digits=4),
),
migrations.AlterField(
model_name='auditresult',
name='implementation_score',
field=models.DecimalField(decimal_places=1, max_digits=4),
),
migrations.AlterField(
model_name='auditrulecategoryresult',
name='implementation_pass_fail_score',
field=models.DecimalField(decimal_places=1, max_digits=4),
),
migrations.AlterField(
model_name='auditrulecategoryresult',
name='implementation_score',
field=models.DecimalField(decimal_places=1, max_digits=4),
),
migrations.AlterField(
model_name='auditruleresult',
name='implementation_pass_fail_score',
field=models.DecimalField(decimal_places=1, max_digits=4),
),
migrations.AlterField(
model_name='auditruleresult',
name='implementation_score',
field=models.DecimalField(decimal_places=1, max_digits=4),
),
migrations.AlterField(
model_name='auditrulescoperesult',
name='implementation_pass_fail_score',
field=models.DecimalField(decimal_places=1, max_digits=4),
),
migrations.AlterField(
model_name='auditrulescoperesult',
name='implementation_score',
field=models.DecimalField(decimal_places=1, max_digits=4),
),
]
| 35.469697
| 70
| 0.624947
| 217
| 2,341
| 6.474654
| 0.230415
| 0.142349
| 0.177936
| 0.206406
| 0.859786
| 0.859786
| 0.859786
| 0.810676
| 0.810676
| 0.810676
| 0
| 0.031213
| 0.274669
| 2,341
| 65
| 71
| 36.015385
| 0.796231
| 0.029047
| 0
| 0.862069
| 1
| 0
| 0.203965
| 0.096476
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.086207
| 0.034483
| 0
| 0.086207
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
aee96e2e09fd4e8469fa3a4e0e6520918ea5f689
| 3,637
|
py
|
Python
|
sqlact.py
|
sherlockxiao-git/Helmet-face-Detection
|
3c8c3edaf435fbd011b41f74a3417c44f66f52b6
|
[
"MIT"
] | 2
|
2020-06-22T03:32:57.000Z
|
2020-09-22T01:24:55.000Z
|
sqlact.py
|
sherlockxiao-git/Helmet-face-Detection
|
3c8c3edaf435fbd011b41f74a3417c44f66f52b6
|
[
"MIT"
] | null | null | null |
sqlact.py
|
sherlockxiao-git/Helmet-face-Detection
|
3c8c3edaf435fbd011b41f74a3417c44f66f52b6
|
[
"MIT"
] | null | null | null |
import pymysql
######################################
def add_to_sql(tablename, path, name):
aiuse = pymysql.connect(host="localhost",
port=3306,
user="root",
password="wang518518",
db="db_helmet")
cs3 = aiuse.cursor()
sql3 = "insert into " + tablename + " values(%s, %s ,0 )"
cs3.execute(sql3, (path, name))
aiuse.commit() # 提交数据
cs3.close() # 关闭游标cs3
aiuse.close() # 关闭数据库
print("well done")
#################################
def delete_one_sql(tablename, image_id):
aiuse = pymysql.connect(host="localhost",
port=3306,
user="root",
password="wang518518",
db="db_aiuse")
cs3 = aiuse.cursor()
sql = "delete from" + tablename + "where id=%s"
cs3.execute(sql, (image_id))
aiuse.commit() ## 提交数据
cs3.close() ## 关闭游标cs3
aiuse.close() ## 关闭数据库
print("well done")
######################################
def update_one_sql(tablename, name):
aiuse = pymysql.connect(host="localhost",
port=3306,
user="root",
password="wang518518",
db="db_helmet")
cs3 = aiuse.cursor()
sql3 = "update " + tablename + " set illegal=illegal+1 where path = %s"
cs3.execute(sql3, (name))
aiuse.commit() ## 提交数据
cs3.close() ## 关闭游标cs3
aiuse.close() ## 关闭数据库
print("well done")
###########################
def search_sql(tablename, image_id):
aiuse = pymysql.connect(host="localhost",
port=3306,
user="root",
password="wang518518",
db="db_helmet")
cs3 = aiuse.cursor()
sql3 = "select * from " + tablename + " where image_id = %s "
cs3.execute(sql3, (image_id))
data3 = cs3.fetchall() ## 指针移动到最后一行了
# print(data3)
cs3.close() ## 关闭游标cs3
aiuse.close() ## 关闭数据库
return data3
#############################
def search_by_path(tablename, path):
aiuse = pymysql.connect(host="localhost",
port=3306,
user="root",
password="wang518518",
db="db_helmet")
cs3 = aiuse.cursor()
sql3 = "select * from " + tablename + " where path = %s "
cs3.execute(sql3, (path))
data3 = cs3.fetchall() ## 指针移动到最后一行了
# print(data3)
cs3.close() ## 关闭游标cs3
aiuse.close() ## 关闭数据库
return data3
################################
def search_all_sql():
aiuse = pymysql.connect(host="localhost",
port=3306,
user="root",
password="wang518518",
db="db_helmet")
cs3 = aiuse.cursor()
sql3 = "select name,illegal from face"
cs3.execute(sql3)
data3 = cs3.fetchall() ## 指针移动到最后一行了
# print(data3)
cs3.close() ## 关闭游标cs3
aiuse.close() ## 关闭数据库
return data3
def delete_illegal():
aiuse = pymysql.connect(host="localhost",
port=3306,
user="root",
password="wang518518",
db="db_aiuse")
cs3 = aiuse.cursor()
sql = "delete from" + "where id=%s"
cs3.execute(sql, ())
aiuse.commit() ## 提交数据
cs3.close() ## 关闭游标cs3
aiuse.close() ## 关闭数据库
print("well done")
| 28.637795
| 76
| 0.45532
| 331
| 3,637
| 4.930514
| 0.172205
| 0.051471
| 0.081495
| 0.098652
| 0.833333
| 0.833333
| 0.78125
| 0.78125
| 0.78125
| 0.78125
| 0
| 0.053275
| 0.37036
| 3,637
| 127
| 77
| 28.637795
| 0.659389
| 0.051966
| 0
| 0.76087
| 0
| 0
| 0.14771
| 0
| 0.01087
| 0
| 0
| 0
| 0
| 1
| 0.076087
| false
| 0.076087
| 0.01087
| 0
| 0.119565
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
4e0ca8b263333f35f38ddaaf63e2272b3480fb8a
| 449
|
py
|
Python
|
tests/cases/run/comparison6.py
|
3e45/minpiler
|
993bdb38d1e4709a412bb551f7eb213376bfe7d2
|
[
"MIT"
] | null | null | null |
tests/cases/run/comparison6.py
|
3e45/minpiler
|
993bdb38d1e4709a412bb551f7eb213376bfe7d2
|
[
"MIT"
] | 5
|
2022-02-12T19:53:08.000Z
|
2022-03-02T04:30:32.000Z
|
tests/cases/run/comparison6.py
|
3e45/minpiler
|
993bdb38d1e4709a412bb551f7eb213376bfe7d2
|
[
"MIT"
] | null | null | null |
from minpiler.std import L, M, emulator
M.print(L.message1.type == M.at.Block.message, ";", L.message1.type != M.at.Block.message, ";")
M.print(L.message1.type is M.at.Block.message, ";", L.message1.type is not M.at.Block.message, ";")
M.print(L.message1.type == M.at.Block.duo, ";", L.message1.type != M.at.Block.duo, ";")
M.print(L.message1.type is M.at.Block.duo, ";", L.message1.type is not M.at.Block.duo)
emulator.kill()
# > 1;0;1;0;0;1;0;1
| 44.9
| 99
| 0.66147
| 87
| 449
| 3.413793
| 0.218391
| 0.242424
| 0.350168
| 0.20202
| 0.818182
| 0.818182
| 0.818182
| 0.626263
| 0.360269
| 0
| 0
| 0.039604
| 0.100223
| 449
| 9
| 100
| 49.888889
| 0.695545
| 0.037862
| 0
| 0
| 0
| 0
| 0.016279
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.166667
| 0
| 0.166667
| 0.666667
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
9dd58129b873648c45821ea45f346f39f53b0c1d
| 6,165
|
py
|
Python
|
tests/common/graph/test_graph_matching.py
|
MaximProshin/nncf
|
2290d2f4cebcf6749e419dc76850e7bd8b7d8da1
|
[
"Apache-2.0"
] | 136
|
2020-06-01T14:03:31.000Z
|
2020-10-28T06:10:50.000Z
|
tests/common/graph/test_graph_matching.py
|
MaximProshin/nncf
|
2290d2f4cebcf6749e419dc76850e7bd8b7d8da1
|
[
"Apache-2.0"
] | 133
|
2020-05-26T13:48:04.000Z
|
2020-10-28T05:25:55.000Z
|
tests/common/graph/test_graph_matching.py
|
MaximProshin/nncf
|
2290d2f4cebcf6749e419dc76850e7bd8b7d8da1
|
[
"Apache-2.0"
] | 36
|
2020-05-28T08:18:39.000Z
|
2020-10-27T14:46:58.000Z
|
from tests.common.graph.test_graph_pattern import TestPattern
from nncf.common.graph.graph_matching import find_subgraphs_matching_pattern
import networkx as nx
import itertools
def test_ops_combination_patterns():
pattern = TestPattern.first_pattern + TestPattern.second_pattern
ref_graph = nx.DiGraph()
ref_graph.add_node('1', type='a')
ref_graph.add_node('2', type='c')
ref_graph.add_edge('1', '2')
matches = find_subgraphs_matching_pattern(ref_graph, pattern)
assert matches == [['1', '2']]
pattern = TestPattern.first_pattern + TestPattern.second_pattern | TestPattern.third_pattern
ref_graph = nx.DiGraph()
ref_graph.add_node('1', type='a')
ref_graph.add_node('2', type='c')
ref_graph.add_edge('1', '2')
matches = find_subgraphs_matching_pattern(ref_graph, pattern)
assert matches == [['1', '2']]
pattern = (TestPattern.first_pattern + TestPattern.second_pattern)
pattern_nodes = list(pattern.graph.nodes)
third_nodes = list(TestPattern.third_pattern.graph.nodes)
edges = list(itertools.product(pattern_nodes, third_nodes))
pattern.join_patterns(TestPattern.third_pattern, edges)
ref_graph = nx.DiGraph()
ref_graph.add_node('1', type='a')
ref_graph.add_node('2', type='c')
ref_graph.add_node('3', type='e')
ref_graph.add_edge('1', '2')
ref_graph.add_edge('1', '3')
ref_graph.add_edge('2', '3')
matches = find_subgraphs_matching_pattern(ref_graph, pattern)
assert matches == [['1', '2', '3']]
def test_no_matches():
pattern = (TestPattern.first_pattern + TestPattern.second_pattern + TestPattern.third_pattern)
pattern_nodes = list(pattern.graph.nodes)
third_nodes = list(TestPattern.third_pattern.graph.nodes)
edges = list(itertools.product(pattern_nodes, third_nodes))
pattern.join_patterns(TestPattern.third_pattern, edges)
ref_graph = nx.DiGraph()
ref_graph.add_node('1', type='a')
ref_graph.add_node('2', type='c')
ref_graph.add_node('3', type='e')
ref_graph.add_edge('1', '2')
ref_graph.add_edge('2', '3')
matches = find_subgraphs_matching_pattern(ref_graph, pattern)
assert not matches
def test_two_matches():
pattern = TestPattern.first_pattern + TestPattern.second_pattern
ref_graph = nx.DiGraph()
ref_graph.add_node('1', type='a')
ref_graph.add_node('2', type='c')
ref_graph.add_node('3', type='e')
ref_graph.add_node('4', type='c')
ref_graph.add_node('5', type='a')
ref_graph.add_node('6', type='d')
ref_graph.add_edge('1', '2')
ref_graph.add_edge('2', '3')
ref_graph.add_edge('5', '6')
matches = find_subgraphs_matching_pattern(ref_graph, pattern)
matches.sort()
assert matches == [['1', '2'], ['5', '6']]
def create_graph_with_many_nodes():
# ref_graph
# a
# |
# a
# |
# b b
# / \ /
# a c
# | /
# | /
# |/
# e
# |
# a---c
ref_graph = nx.DiGraph()
nodes = {
'1': {'type': 'a'}, '2': {'type': 'b'}, '3': {'type': 'c'}, '4': {'type': 'a'}, '5': {'type': 'e'},
'6': {'type': 'a'}, '7': {'type': 'a'}, '8': {'type': 'b'}, '9': {'type': 'c'}
}
for k, attrs in nodes.items():
ref_graph.add_node(k, **attrs)
ref_graph.add_edges_from([('1', '2'), ('2', '3'), ('2', '4'), ('4', '5'), ('5', '6'),
('3', '5'), ('7', '1'), ('8', '3'), ('9', '6')])
return ref_graph
def test_matches_with_non_pattern_node_type():
pattern = TestPattern.forth_pattern + TestPattern.first_pattern + TestPattern.second_pattern
ref_graph = nx.DiGraph()
ref_graph.add_node('1', type='a')
ref_graph.add_node('2', type='a')
ref_graph.add_node('3', type='c')
ref_graph.add_edge('1', '2')
ref_graph.add_edge('2', '3')
matches = find_subgraphs_matching_pattern(ref_graph, pattern)
assert matches == [['2', '3']]
pattern = TestPattern.forth_pattern + TestPattern.first_pattern + \
TestPattern.second_pattern + TestPattern.forth_pattern
ref_graph = nx.DiGraph()
ref_graph.add_node('1', type='a')
ref_graph.add_node('2', type='a')
ref_graph.add_node('3', type='c')
ref_graph.add_edge('1', '2')
ref_graph.add_edge('2', '3')
matches = find_subgraphs_matching_pattern(ref_graph, pattern)
assert matches == [['2', '3']]
pattern = TestPattern.pattern_with_non_pattern_nodes
ref_graph = nx.DiGraph()
ref_graph.add_node('1', type='a')
ref_graph.add_node('2', type='b')
ref_graph.add_node('3', type='c')
ref_graph.add_edge('1', '2')
ref_graph.add_edge('2', '3')
matches = find_subgraphs_matching_pattern(ref_graph, pattern)
assert not matches
ref_graph = nx.DiGraph()
ref_graph.add_node('1', type='a')
ref_graph.add_node('2', type='b')
ref_graph.add_node('4', type='a')
ref_graph.add_edge('1', '2')
ref_graph.add_edge('2', '4')
matches = find_subgraphs_matching_pattern(ref_graph, pattern)
assert not matches
ref_graph = create_graph_with_many_nodes()
matches = find_subgraphs_matching_pattern(ref_graph, pattern)
assert matches == [['1', '2', '4', '3', '5', '6']]
def test_matches_with_any_pattern_node_type():
pattern = TestPattern.pattern_with_any_pattern_nodes
ref_graph = nx.DiGraph()
ref_graph.add_node('1', type='a')
ref_graph.add_node('2', type='b')
ref_graph.add_node('3', type='c')
ref_graph.add_edge('1', '2')
ref_graph.add_edge('2', '3')
matches = find_subgraphs_matching_pattern(ref_graph, pattern)
assert not matches
ref_graph = nx.DiGraph()
ref_graph.add_node('1', type='a')
ref_graph.add_node('2', type='b')
ref_graph.add_node('4', type='a')
ref_graph.add_edge('1', '2')
ref_graph.add_edge('2', '4')
matches = find_subgraphs_matching_pattern(ref_graph, pattern)
assert not matches
ref_graph = create_graph_with_many_nodes()
matches = find_subgraphs_matching_pattern(ref_graph, pattern)
assert matches == [['7', '1', '2', '4', '8', '3', '5', '9', '6']]
| 33.688525
| 107
| 0.633577
| 873
| 6,165
| 4.172967
| 0.075601
| 0.191051
| 0.17513
| 0.144112
| 0.858359
| 0.833928
| 0.821576
| 0.821576
| 0.800988
| 0.800988
| 0
| 0.027201
| 0.194972
| 6,165
| 182
| 108
| 33.873626
| 0.706831
| 0.023682
| 0
| 0.723881
| 0
| 0
| 0.035667
| 0
| 0
| 0
| 0
| 0
| 0.097015
| 1
| 0.044776
| false
| 0
| 0.029851
| 0
| 0.08209
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9de197108ede3a20397277f3c2d47d91b7b13e99
| 312
|
py
|
Python
|
openstack-cyborg-2.0.0/cyborg/objects/driver_objects/__init__.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 37
|
2017-03-23T02:10:35.000Z
|
2021-11-25T07:57:36.000Z
|
openstack-cyborg-2.0.0/cyborg/objects/driver_objects/__init__.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
openstack-cyborg-2.0.0/cyborg/objects/driver_objects/__init__.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 27
|
2017-07-14T02:26:24.000Z
|
2022-01-19T07:55:10.000Z
|
__import__('cyborg.objects.driver_objects.driver_device')
__import__('cyborg.objects.driver_objects.driver_attribute')
__import__('cyborg.objects.driver_objects.driver_attach_handle')
__import__('cyborg.objects.driver_objects.driver_deployable')
__import__('cyborg.objects.driver_objects.driver_controlpath_id')
| 52
| 65
| 0.871795
| 37
| 312
| 6.486486
| 0.297297
| 0.541667
| 0.395833
| 0.520833
| 0.791667
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016026
| 312
| 5
| 66
| 62.4
| 0.781759
| 0
| 0
| 0
| 0
| 0
| 0.759615
| 0.759615
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
d186226bac7d7f145d1384b333a9a1b58e78b0fa
| 102
|
py
|
Python
|
modules/__init__.py
|
Julian-Hochhaus/py_modules
|
cf3fa9b1193a0022ffcd7d3b15979ac19936da61
|
[
"MIT"
] | 2
|
2018-04-27T12:00:37.000Z
|
2018-11-12T14:08:43.000Z
|
modules/__init__.py
|
Julian-Hochhaus/py_modules
|
cf3fa9b1193a0022ffcd7d3b15979ac19936da61
|
[
"MIT"
] | 5
|
2018-11-13T19:02:38.000Z
|
2019-07-29T10:57:11.000Z
|
modules/__init__.py
|
Julian-Hochhaus/py_modules
|
cf3fa9b1193a0022ffcd7d3b15979ac19936da61
|
[
"MIT"
] | null | null | null |
from .tab2latex.tab2latex.textable import latex_tab
from .tab2latex.tab2latex.textable import long_tab
| 51
| 51
| 0.872549
| 14
| 102
| 6.214286
| 0.5
| 0.298851
| 0.505747
| 0.689655
| 0.827586
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042105
| 0.068627
| 102
| 2
| 52
| 51
| 0.873684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
d19946326ec731917265910d08c9c1d3b37873db
| 62,159
|
py
|
Python
|
krake/tests/controller/kubernetes/test_observer.py
|
rak-n-rok/Krake
|
2f0d4a382b99639e2c1149ee8593a9bb589d2d3f
|
[
"Apache-2.0"
] | 1
|
2020-05-29T08:43:32.000Z
|
2020-05-29T08:43:32.000Z
|
krake/tests/controller/kubernetes/test_observer.py
|
rak-n-rok/Krake
|
2f0d4a382b99639e2c1149ee8593a9bb589d2d3f
|
[
"Apache-2.0"
] | null | null | null |
krake/tests/controller/kubernetes/test_observer.py
|
rak-n-rok/Krake
|
2f0d4a382b99639e2c1149ee8593a9bb589d2d3f
|
[
"Apache-2.0"
] | 1
|
2019-11-19T13:39:02.000Z
|
2019-11-19T13:39:02.000Z
|
import asyncio
import json
from contextlib import suppress
import pytest
from aiohttp import web
from copy import deepcopy
from krake.api.app import create_app
from krake.controller.kubernetes.client import KubernetesClient
from krake.controller.kubernetes.hooks import (
register_observer,
update_last_applied_manifest_from_spec,
update_last_applied_manifest_from_resp,
update_last_observed_manifest_from_resp,
generate_default_observer_schema,
)
from krake.data.core import resource_ref
from krake.data.kubernetes import Application, ApplicationState
from krake.controller.kubernetes import KubernetesController, KubernetesObserver
from krake.client import Client
from krake.test_utils import server_endpoint, get_first_container, serialize_k8s_object
from tests.factories.fake import fake
from tests.factories.kubernetes import (
ApplicationFactory,
ClusterFactory,
make_kubeconfig,
)
from tests.controller.kubernetes import (
deployment_manifest,
service_manifest,
nginx_manifest,
custom_deployment_observer_schema,
custom_service_observer_schema,
custom_observer_schema,
mangled_observer_schema,
deployment_response,
service_response,
configmap_response,
initial_last_observed_manifest_deployment,
initial_last_observed_manifest_service,
initial_last_observed_manifest,
)
async def test_reception_for_observer(aiohttp_server, config, db, loop):
"""Test the condition to start an Observer
When an received application is in PENDING state, no Observer should be started.
When an application is RUNNING, an Observer should be started.
"""
cluster = ClusterFactory()
pending = ApplicationFactory(status__state=ApplicationState.PENDING)
running = ApplicationFactory(
status__running_on=resource_ref(cluster), status__state=ApplicationState.RUNNING
)
server = await aiohttp_server(create_app(config))
await db.put(cluster)
await db.put(pending)
await db.put(running)
async with Client(url=server_endpoint(server), loop=loop) as client:
controller = KubernetesController(server_endpoint(server), worker_count=0)
# Update the client, to be used by the background tasks
await controller.prepare(client) # need to be called explicitly
await controller.reflector.list_resource()
# Each running Application has a corresponding observer
assert len(controller.observers) == 1
assert running.metadata.uid in controller.observers
async def test_observer_on_poll_update(aiohttp_server, db, config, loop):
"""Test the Observer's behavior on update of a resource on the k8s cluster directly
This test goes through the following scenario:
State (0):
A Deployment, a Service and a ConfigMap are present, the Deployment has an
nginx image with version "1.7.9". The service defines 1 port using the "TCP"
protocol. A custom observer schema is used:
- It observes the deployment's image, initialized by the given manifest file.
- It observes the deployment's replicas count, initialized by k8s to 1.
- The Service's first port's protocol, initialized in the manifest file, is
*not* observed
- It accepts between 0 and 2 ports.
- The presence of the ConfigMap is observed
State (1):
The Deployment image version changed to "1.6".
State (2):
The Deployment replicas count is changed to 2.
State (3):
The Service's first port's protocol is changed to "UDP"
State (4):
A second port is added to the Service.
State (5):
A third port is added to the Service.
State (6):
All ports are removed from the Service.
State (7):
The ConfigMap is deleted
For each state, it is tested if the Kubernetes Observer detects the update and calls
the ``on_res_update`` method.
"""
routes = web.RouteTableDef()
# When the Observer observes the application, it queries the k8s API to get the
# current state of each of the application resources
@routes.get("/apis/apps/v1/namespaces/secondary/deployments/nginx-demo")
async def _(request):
nonlocal actual_state
if actual_state == 0 or actual_state in range(3, 8):
# The Deployment has not been modified on the cluster
return web.json_response(deployment_response)
updated_deployment_response = deepcopy(deployment_response)
if actual_state == 1:
# State (1): The Deployment image version changed to "1.6".
first_container = get_first_container(updated_deployment_response)
first_container["image"] = "nginx:1.6"
elif actual_state == 2:
# State (2): The Deployment replicas count is changed to 2.
updated_deployment_response["spec"]["replicas"] = 2
return web.json_response(updated_deployment_response)
@routes.get("/api/v1/namespaces/secondary/services/nginx-demo")
async def _(request):
nonlocal actual_state
if actual_state in (0, 1, 2, 7):
# The Service has not been modified on the cluster
return web.json_response(service_response)
updated_service_response = deepcopy(service_response)
if actual_state == 3:
# State (3): The Service's first port's protocol is changed to "UDP"
updated_service_response["spec"]["ports"][0]["protocol"] = "UDP"
elif actual_state == 4:
# State (4): A second port is added to the Service.
updated_service_response["spec"]["ports"].append(
{"nodePort": 32567, "port": 81, "protocol": "TCP", "targetPort": 81}
)
elif actual_state == 5:
# State (5): A third port is added to the Service.
updated_service_response["spec"]["ports"].append(
{"nodePort": 32567, "port": 81, "protocol": "TCP", "targetPort": 81}
)
updated_service_response["spec"]["ports"].append(
{"nodePort": 32568, "port": 82, "protocol": "TCP", "targetPort": 82}
)
elif actual_state == 6:
# State (6): All ports are removed from the Service.
updated_service_response["spec"]["ports"] = []
return web.json_response(updated_service_response)
@routes.get("/api/v1/namespaces/secondary/configmaps/nginx-demo")
async def _(request):
nonlocal actual_state
if actual_state in range(0, 7):
# The ConfigMap has not been modified on the cluster
return web.json_response(configmap_response)
elif actual_state == 7:
# State (7): The ConfigMap is deleted
return web.Response(status=404)
kubernetes_app = web.Application()
kubernetes_app.add_routes(routes)
kubernetes_server = await aiohttp_server(kubernetes_app)
cluster = ClusterFactory(spec__kubeconfig=make_kubeconfig(kubernetes_server))
app = ApplicationFactory(
status__state=ApplicationState.RUNNING,
status__running_on=resource_ref(cluster),
spec__manifest=nginx_manifest,
status__mangled_observer_schema=mangled_observer_schema,
status__last_observed_manifest=initial_last_observed_manifest,
)
calls_to_res_update = 0
async def on_res_update(resource):
assert resource.metadata.name == app.metadata.name
nonlocal calls_to_res_update, actual_state
calls_to_res_update += 1
manifests = resource.status.last_observed_manifest
ports_length = manifests[1]["spec"]["ports"][-1][
"observer_schema_list_current_length"
]
if actual_state == 0:
# As no changes are noticed by the Observer, the res_update function will
# not be called.
assert False
elif actual_state == 1:
# State (1): The Deployment image version changed to "1.6"
status_image = get_first_container(manifests[0])["image"]
assert status_image == "nginx:1.6"
# Three resources are observed
assert len(manifests) == 3
elif actual_state == 2:
# State (2): The Deployment replicas count is changed to 2.
assert manifests[0]["spec"]["replicas"] == 2
elif actual_state == 3:
# State (3): The Service's first port's protocol is changed to "UDP"
# As this field is *not* observed, the ``on_res_update`` method shouldn't be
# called.
assert False
elif actual_state == 4:
# State (4): A second port is added to the Service.
# Check the current length of the list of ports according to the Observer.
assert ports_length == 2
elif actual_state == 5:
# State (5): A third port is added to the Service.
# Check the current length of the list of ports according to the Observer.
assert ports_length == 3
elif actual_state == 6:
# State (6): All ports are removed from the Service.
# Check the current length of the list of ports according to the Observer.
assert ports_length == 0
elif actual_state == 7:
# State (7): The ConfigMap is deleted
assert len(manifests) == 2
observer = KubernetesObserver(cluster, app, on_res_update, time_step=-1)
# Observe an unmodified resource
# As no changes are noticed by the Observer, the res_update function will not be
# called.
actual_state = 0
assert calls_to_res_update == 0
# Modify the actual resource "externally"
actual_state = 1
await observer.observe_resource()
assert calls_to_res_update == 1
# Delete the service "externally"
actual_state = 2
await observer.observe_resource()
assert calls_to_res_update == 2
# State (3): The Service's first port's protocol is changed to "UDP"
# As this field is *not* observed, the ``on_res_update`` method shouldn't be called
actual_state = 3
await observer.observe_resource()
assert calls_to_res_update == 2
# State (4): A second port is added to the Service.
actual_state = 4
await observer.observe_resource()
assert calls_to_res_update == 3
# State (5): A third port is added to the Service.
actual_state = 5
await observer.observe_resource()
assert calls_to_res_update == 4
# State (6): All ports are removed from the Service.
actual_state = 6
await observer.observe_resource()
assert calls_to_res_update == 5
# State (7): The ConfigMap is deleted
actual_state = 7
await observer.observe_resource()
assert calls_to_res_update == 6
def set_default_namespace(response):
"""Creates a copy of the given Kubernetes API response, where the namespaces have
been reset to the default one.
Args:
response (dict): the response to modify.
Returns:
dict: a copy of the original response, with the namespaces updated.
"""
copy = deepcopy(response)
default_namespace = "default"
original_namespace = copy["metadata"]["namespace"]
new_self_link = copy["metadata"]["selfLink"].replace(
original_namespace, default_namespace
)
copy["metadata"]["selfLink"] = new_self_link
copy["metadata"]["namespace"] = default_namespace
return copy
async def test_observer_on_poll_update_default_namespace(
aiohttp_server, db, config, loop
):
"""Test the Observer's behavior on update of an actual resource which has been
created WITHOUT any namespace, and for which the cluster's kubeconfig also did not
specify any namespace.
State (0):
a Deployment and a Service are present, the Deployment has an nginx
image with version "1.7.9"
State (1):
both resources are still present, but the Deployment image version
changed to "1.6"
State (2):
only the Deployment is present, with the version "1.6"
"""
routes = web.RouteTableDef()
deployment = set_default_namespace(deployment_response)
service = set_default_namespace(service_response)
# Actual resource, with container image and selector changed
updated_app = deepcopy(deployment)
first_container = get_first_container(updated_app)
first_container["image"] = "nginx:1.6"
# Test the observation of changes on values with a CamelCase format
updated_app["spec"]["selector"]["matchLabels"] = {"app": "foo"}
accepted = "default"
called_get = False
called_post = False
@routes.get("/api/v1/namespaces/{namespace}/services/nginx-demo")
async def _(request):
nonlocal called_get
received = request.match_info["namespace"]
assert (
received == accepted
), f"The namespace {received} must not be used by the client."
called_get = True
nonlocal actual_state
if actual_state in (0, 1):
return web.json_response(service)
elif actual_state == 2:
return web.Response(status=404)
@routes.get("/apis/apps/v1/namespaces/{namespace}/deployments/nginx-demo")
async def _(request):
nonlocal called_post
received = request.match_info["namespace"]
assert (
received == accepted
), f"The namespace {received} must not be used by the client."
called_post = True
nonlocal actual_state
if actual_state == 0:
return web.json_response(deployment)
elif actual_state >= 1:
return web.json_response(updated_app)
kubernetes_app = web.Application()
kubernetes_app.add_routes(routes)
kubernetes_server = await aiohttp_server(kubernetes_app)
cluster = ClusterFactory(spec__kubeconfig=make_kubeconfig(kubernetes_server))
# Create a manifest with resources without any namespace.
copy_nginx_manifest = deepcopy(nginx_manifest)
for resource in copy_nginx_manifest:
del resource["metadata"]["namespace"]
# Adapt namespace in mangled observer schema and last observed manifest
copy_mangled_observer_schema = deepcopy(mangled_observer_schema)
for resource in copy_mangled_observer_schema:
resource["metadata"]["namespace"] = "default"
copy_initial_last_observed_manifest = deepcopy(initial_last_observed_manifest)
for resource in copy_initial_last_observed_manifest:
resource["metadata"]["namespace"] = "default"
app = ApplicationFactory(
status__state=ApplicationState.RUNNING,
status__running_on=resource_ref(cluster),
spec__manifest=copy_nginx_manifest,
status__mangled_observer_schema=copy_mangled_observer_schema,
status__last_observed_manifest=copy_initial_last_observed_manifest,
)
calls_to_res_update = 0
async def on_res_update(resource):
assert resource.metadata.name == app.metadata.name
nonlocal calls_to_res_update, actual_state
calls_to_res_update += 1
manifests = resource.status.last_observed_manifest
status_image = get_first_container(manifests[0])["image"]
if actual_state == 0:
# As no changes are noticed by the Observer, the res_update function will
# not be called.
assert False, "The first poll of the observer should not issue an update."
elif actual_state == 1:
assert status_image == "nginx:1.6"
assert len(manifests) == 2
elif actual_state == 2:
assert status_image == "nginx:1.6"
assert len(manifests) == 1
assert manifests[0]["kind"] == "Deployment"
# The spec never changes
spec_image = get_first_container(resource.spec.manifest[0])["image"]
assert spec_image == "nginx:1.7.9"
observer = KubernetesObserver(cluster, app, on_res_update, time_step=-1)
# Observe an unmodified resource
# As no changes are noticed by the Observer, the res_update function will not be
# called.
actual_state = 0
assert calls_to_res_update == 0
# State (1): The Deployment image version changed to "1.6"
actual_state = 1
await observer.observe_resource()
assert calls_to_res_update == 1
# State (2): The Deployment replicas count is changed to 2.
actual_state = 2
await observer.observe_resource()
assert calls_to_res_update == 2
assert called_get and called_post, "GET and POST did not get call at least once."
async def test_observer_on_poll_update_cluster_default_namespace(
aiohttp_server, db, config, loop
):
"""Test the Observer's behavior on update of an actual resource which has been
created WITHOUT any namespace, but where a default namespace has been set in the
Cluster's kubeconfig.
State (0):
a Deployment and a Service are present, the Deployment has an nginx
image with version "1.7.9"
State (1):
both resources are still present, but the Deployment image version
changed to "1.6"
State (2):
only the Deployment is present, with the version "1.6"
"""
routes = web.RouteTableDef()
deployment = set_default_namespace(deployment_response)
service = set_default_namespace(service_response)
# Actual resource, with container image and selector changed
updated_app = deepcopy(deployment)
first_container = get_first_container(updated_app)
first_container["image"] = "nginx:1.6"
# Test the observation of changes on values with a CamelCase format
updated_app["spec"]["selector"]["matchLabels"] = {"app": "foo"}
accepted = "another_namespace"
called_get = False
called_post = False
@routes.get("/api/v1/namespaces/{namespace}/services/nginx-demo")
async def _(request):
nonlocal called_get
received = request.match_info["namespace"]
assert (
received == accepted
), f"The namespace {received} must not be used by the client."
called_get = True
nonlocal actual_state
if actual_state in (0, 1):
return web.json_response(service)
elif actual_state == 2:
return web.Response(status=404)
@routes.get("/apis/apps/v1/namespaces/{namespace}/deployments/nginx-demo")
async def _(request):
nonlocal called_post
received = request.match_info["namespace"]
assert (
received == accepted
), f"The namespace {received} must not be used by the client."
called_post = True
nonlocal actual_state
if actual_state == 0:
return web.json_response(deployment)
elif actual_state >= 1:
return web.json_response(updated_app)
kubernetes_app = web.Application()
kubernetes_app.add_routes(routes)
kubernetes_server = await aiohttp_server(kubernetes_app)
# Replace the default namespace in the kubeconfig file
kubeconfig = make_kubeconfig(kubernetes_server)
kubeconfig["contexts"][0]["context"]["namespace"] = "another_namespace"
cluster = ClusterFactory(spec__kubeconfig=kubeconfig)
# Create a manifest with resources without any namespace.
copy_nginx_manifest = deepcopy(nginx_manifest)
for resource in copy_nginx_manifest:
del resource["metadata"]["namespace"]
# Adapt namespace in mangled observer schema and last observed manifest
copy_mangled_observer_schema = deepcopy(mangled_observer_schema)
for resource in copy_mangled_observer_schema:
resource["metadata"]["namespace"] = "another_namespace"
copy_initial_last_observed_manifest = deepcopy(initial_last_observed_manifest)
for resource in copy_initial_last_observed_manifest:
resource["metadata"]["namespace"] = "another_namespace"
app = ApplicationFactory(
status__state=ApplicationState.RUNNING,
status__running_on=resource_ref(cluster),
spec__manifest=copy_nginx_manifest,
status__last_observed_manifest=copy_initial_last_observed_manifest,
status__mangled_observer_schema=copy_mangled_observer_schema,
)
calls_to_res_update = 0
async def on_res_update(resource):
assert resource.metadata.name == app.metadata.name
nonlocal calls_to_res_update, actual_state
calls_to_res_update += 1
manifests = resource.status.last_observed_manifest
status_image = get_first_container(manifests[0])["image"]
if actual_state == 0:
# As no changes are noticed by the Observer, the res_update function will
# not be called.
assert False, "The first poll of the observer should not issue an update."
elif actual_state == 1:
assert status_image == "nginx:1.6"
assert len(manifests) == 2
elif actual_state == 2:
assert status_image == "nginx:1.6"
assert len(manifests) == 1
assert manifests[0]["kind"] == "Deployment"
# The spec never changes
spec_image = get_first_container(resource.spec.manifest[0])["image"]
assert spec_image == "nginx:1.7.9"
observer = KubernetesObserver(cluster, app, on_res_update, time_step=-1)
# Observe an unmodified resource
# As no changes are noticed by the Observer, the res_update function will not be
# called.
actual_state = 0
assert calls_to_res_update == 0
# Modify the actual resource "externally"
actual_state = 1
await observer.observe_resource()
assert calls_to_res_update == 1
# Delete the service "externally"
actual_state = 2
await observer.observe_resource()
assert calls_to_res_update == 2
assert called_get and called_post, "GET and POST did not get call at least once."
async def test_observer_on_poll_update_manifest_namespace_set(
aiohttp_server, db, config, loop
):
"""Test the Observer's behavior on update of an actual resource which has been
created with a defined namespace, but where a default namespace has been set in the
Cluster's kubeconfig. The manifest file's namespace should be used.
State (0):
a Deployment and a Service are present, the Deployment has an nginx
image with version "1.7.9"
State (1):
both resources are still present, but the Deployment image version
changed to "1.6"
State (2):
only the Deployment is present, with the version "1.6"
"""
routes = web.RouteTableDef()
deployment = set_default_namespace(deployment_response)
service = set_default_namespace(service_response)
# Actual resource, with container image and selector changed
updated_app = deepcopy(deployment)
first_container = get_first_container(updated_app)
first_container["image"] = "nginx:1.6"
# Test the observation of changes on values with a CamelCase format
updated_app["spec"]["selector"]["matchLabels"] = {"app": "foo"}
accepted = "secondary"
called_get = False
called_post = False
@routes.get("/api/v1/namespaces/{namespace}/services/nginx-demo")
async def _(request):
nonlocal called_get
received = request.match_info["namespace"]
assert (
received == accepted
), f"The namespace {received} must not be used by the client."
called_get = True
nonlocal actual_state
if actual_state in (0, 1):
return web.json_response(service)
elif actual_state == 2:
return web.Response(status=404)
@routes.get("/apis/apps/v1/namespaces/{namespace}/deployments/nginx-demo")
async def _(request):
nonlocal called_post
received = request.match_info["namespace"]
assert (
received == accepted
), f"The namespace {received} must not be used by the client."
called_post = True
nonlocal actual_state
if actual_state == 0:
return web.json_response(deployment)
elif actual_state >= 1:
return web.json_response(updated_app)
kubernetes_app = web.Application()
kubernetes_app.add_routes(routes)
kubernetes_server = await aiohttp_server(kubernetes_app)
# Replace the default namespace in the kubeconfig file
kubeconfig = make_kubeconfig(kubernetes_server)
kubeconfig["contexts"][0]["context"]["namespace"] = "another_namespace"
cluster = ClusterFactory(spec__kubeconfig=kubeconfig)
app = ApplicationFactory(
status__state=ApplicationState.RUNNING,
status__running_on=resource_ref(cluster),
spec__manifest=nginx_manifest,
status__last_observed_manifest=initial_last_observed_manifest,
)
calls_to_res_update = 0
async def on_res_update(resource):
assert resource.metadata.name == app.metadata.name
nonlocal calls_to_res_update, actual_state
calls_to_res_update += 1
manifests = resource.status.last_observed_manifest
status_image = get_first_container(manifests[0])["image"]
if actual_state == 0:
# As no changes are noticed by the Observer, the res_update function will
# not be called.
assert False, "The first poll of the observer should not issue an update."
elif actual_state == 1:
assert status_image == "nginx:1.6"
assert len(manifests) == 2
elif actual_state == 2:
assert status_image == "nginx:1.6"
assert len(manifests) == 1
assert manifests[0]["kind"] == "Deployment"
# The spec never changes
spec_image = get_first_container(resource.spec.manifest[0])["image"]
assert spec_image == "nginx:1.7.9"
kube = KubernetesClient(cluster.spec.kubeconfig)
generate_default_observer_schema(app, kube.default_namespace)
observer = KubernetesObserver(cluster, app, on_res_update, time_step=-1)
# Observe an unmodified resource
# As no changes are noticed by the Observer, the res_update function will not be
# called.
actual_state = 0
assert calls_to_res_update == 0
# Modify the actual resource "externally"
actual_state = 1
await observer.observe_resource()
assert calls_to_res_update == 1
# Delete the service "externally"
actual_state = 2
await observer.observe_resource()
assert calls_to_res_update == 2
assert called_get and called_post, "GET and POST did not get call at least once."
async def test_observer_on_status_update(aiohttp_server, db, config, loop):
"""Test the behavior of the ``on_status_update`` method of the Kubernetes Controller
The status of the k8s resource changed on the cluster:
- The container image has changed to "1.6". This field is observed.
- The replicas count has changed to 2. This field is observed, though not
initialized by the manifest file
- The Service's first port's protocol has changed to "UDP". Though initialized by
the manifest file, this field is not observed.
- The Service's posses a second port.
This test ensures that the Krake resource's status is updated accordingly.
"""
routes = web.RouteTableDef()
updated_service_response = deepcopy(service_response)
updated_deployment_response = deepcopy(deployment_response)
@routes.get("/api/v1/namespaces/secondary/services/nginx-demo")
async def _(request):
# The Service's first port's protocol is changed to "UDP"
updated_service_response["spec"]["ports"][0]["protocol"] = "UDP"
# A second port is added to the Service.
updated_service_response["spec"]["ports"].append(
{"nodePort": 32567, "port": 81, "protocol": "TCP", "targetPort": 81}
)
return web.json_response(updated_service_response)
@routes.get("/apis/apps/v1/namespaces/secondary/deployments/nginx-demo")
async def _(request):
first_container = get_first_container(updated_deployment_response)
first_container["image"] = "nginx:1.6"
updated_deployment_response["spec"]["replicas"] = 2
return web.json_response(updated_deployment_response)
kubernetes_app = web.Application()
kubernetes_app.add_routes(routes)
kubernetes_server = await aiohttp_server(kubernetes_app)
cluster = ClusterFactory(spec__kubeconfig=make_kubeconfig(kubernetes_server))
app = ApplicationFactory(
status__state=ApplicationState.RUNNING,
status__running_on=resource_ref(cluster),
status__mangled_observer_schema=mangled_observer_schema,
status__last_observed_manifest=initial_last_observed_manifest,
spec__manifest=nginx_manifest,
)
await db.put(cluster)
await db.put(app)
server = await aiohttp_server(create_app(config))
async with Client(url=server_endpoint(server), loop=loop) as client:
controller = KubernetesController(server_endpoint(server), worker_count=0)
await controller.prepare(client)
observer = KubernetesObserver(
cluster, app, controller.on_status_update, time_step=-1
)
await observer.observe_resource()
updated = await db.get(
Application, namespace=app.metadata.namespace, name=app.metadata.name
)
# The spec, the last_applied_manifest and the created timestamp didn't change
assert updated.spec == app.spec
assert updated.status.last_applied_manifest == app.status.last_applied_manifest
assert updated.metadata.created == app.metadata.created
# The last_observed_manifest has been updated for the observed fields only
last_observed_manifest = updated.status.last_observed_manifest
first_container = get_first_container(last_observed_manifest[0])
assert first_container["image"] == "nginx:1.6"
assert last_observed_manifest[0]["spec"]["replicas"] == 2
ports_length = last_observed_manifest[1]["spec"]["ports"][-1][
"observer_schema_list_current_length"
]
assert ports_length == 2
# Protocol of first port is not observed
assert "protocol" not in last_observed_manifest[1]["spec"]["ports"][0]
# ConfigMap is not observed
assert len(last_observed_manifest) == 2
assert last_observed_manifest[0]["kind"] == "Deployment"
assert last_observed_manifest[1]["kind"] == "Service"
async def test_observer_on_status_update_mangled(
aiohttp_server, db, config, loop, hooks_config
):
"""Test the ``on_status_update`` method of the Kubernetes Controller in case of
an Application mangled with the "complete" hook.
State (0):
the Application is created, the hook is added.
State (1):
the Kubernetes resources are not changed and the Observer is called. It should
not trigger an update of the application
State (2):
the Kubernetes resources are changed and the Observer is called. It should
trigger an update of the application
"""
routes = web.RouteTableDef()
actual_state = 0
deploy_mangled_response = deepcopy(deployment_response)
@routes.post("/apis/apps/v1/namespaces/secondary/deployments")
async def _(request):
nonlocal deploy_mangled_response
rd = await request.read()
app = json.loads(rd)
app_first_container = get_first_container(app)
resp_first_container = get_first_container(deploy_mangled_response)
resp_first_container["env"] = app_first_container["env"]
return web.json_response(deploy_mangled_response)
@routes.get("/apis/apps/v1/namespaces/secondary/deployments/nginx-demo")
async def _(request):
nonlocal actual_state
if actual_state == 0:
return web.Response(status=404) # needed for controller.resource_received
if actual_state == 1:
return web.json_response(deploy_mangled_response)
if actual_state == 2:
updated_deployment_response = deepcopy(deploy_mangled_response)
first_container = get_first_container(updated_deployment_response)
first_container["image"] = "nginx:1.6"
return web.json_response(updated_deployment_response)
@routes.post("/api/v1/namespaces/secondary/configmaps")
async def _(request):
return web.json_response(configmap_response)
@routes.post("/api/v1/namespaces/secondary/services")
async def _(request):
return web.json_response(service_response)
@routes.get("/api/v1/namespaces/secondary/services/nginx-demo")
async def _(request):
nonlocal actual_state
if actual_state == 0:
return web.Response(status=404)
elif actual_state >= 1:
return web.json_response(service_response)
@routes.get("/api/v1/namespaces/secondary/configmaps/nginx-demo")
async def _(request):
return web.json_response(configmap_response)
kubernetes_app = web.Application()
kubernetes_app.add_routes(routes)
kubernetes_server = await aiohttp_server(kubernetes_app)
cluster = ClusterFactory(spec__kubeconfig=make_kubeconfig(kubernetes_server))
app = ApplicationFactory(
status__state=ApplicationState.RUNNING,
status__running_on=resource_ref(cluster),
status__scheduled_to=resource_ref(cluster),
spec__manifest=nginx_manifest,
spec__observer_schema=custom_observer_schema,
status__last_observed_manifest=initial_last_observed_manifest,
spec__hooks=["complete"],
)
await db.put(cluster)
await db.put(app)
server = await aiohttp_server(create_app(config))
calls_to_res_update = 0
def update_decorator(func):
async def on_res_update(resource):
nonlocal calls_to_res_update, actual_state
calls_to_res_update += 1
if actual_state == 1:
# Ensure that the Observer is not notifying the Controller
assert False
await func(resource)
return on_res_update
async with Client(url=server_endpoint(server), loop=loop) as client:
generate_default_observer_schema(app)
controller = KubernetesController(
server_endpoint(server), worker_count=0, hooks=hooks_config
)
controller.on_status_update = update_decorator(controller.on_status_update)
await controller.prepare(client)
await controller.resource_received(app, start_observer=False)
# Remove from dict to prevent cancellation in KubernetesController.stop_observer
observer, _ = controller.observers.pop(app.metadata.uid)
assert "env" in get_first_container(
observer.resource.status.mangled_observer_schema[0]
)
actual_state = 1
# The observer should not call on_res_update
await observer.observe_resource()
assert calls_to_res_update == 0
actual_state = 2
await observer.observe_resource()
assert calls_to_res_update == 1
updated = await db.get(
Application, namespace=app.metadata.namespace, name=app.metadata.name
)
assert updated.spec.manifest == app.spec.manifest
# Check that the hook is present and observed in the stored Application
assert "env" in get_first_container(updated.status.last_observed_manifest[0])
assert "env" in get_first_container(updated.status.mangled_observer_schema[0])
assert updated.metadata.created == app.metadata.created
# Check update of observed image
first_container = get_first_container(updated.status.last_observed_manifest[0])
assert first_container["image"] == "nginx:1.6"
async def check_observer_does_not_update(observer, app, db):
"""Ensure that the given observer is up-to-date with the Application on the API.
Args:
observer (KubernetesObserver): the observer to check.
app (Application): the Application that the observer has to monitor. Used just
for its references (name and namespace).
db (krake.api.database.Session): the database session to access the API data
Returns:
Application: the latest version of the Application on the API.
"""
before = await db.get(
Application, namespace=app.metadata.namespace, name=app.metadata.name
)
await observer.observe_resource()
after = await db.get(
Application, namespace=app.metadata.namespace, name=app.metadata.name
)
assert after == before
return after
async def test_observer_on_api_update(aiohttp_server, config, db, loop):
"""Test the connectivity between the Controller and Observer on update of a
resource by the API.
In this test, the resource is updated from the Krake API. The Observer should not
take any actions.
State (0):
a Deployment and a Service are present with standard observer schema. The
Deployment has an nginx image with version "1.7.9"
State (1):
both resources are still present, but the API changed the Deployment image
version to "1.6".
State (2):
the Service is deleted by the API and removed from the observer schema. Only
the Deployment is present, with the version "1.6"
"""
routes = web.RouteTableDef()
actual_state = 0
@routes.get("/api/v1/namespaces/secondary/services/nginx-demo")
async def _(request):
nonlocal actual_state
if actual_state in (0, 1):
return web.json_response(service_response)
elif actual_state == 2:
return web.Response(status=404)
updated_deployment_response = deepcopy(deployment_response)
first_container = get_first_container(updated_deployment_response)
first_container["image"] = "nginx:1.6"
@routes.get("/apis/apps/v1/namespaces/secondary/deployments/nginx-demo")
async def _(request):
nonlocal actual_state
if actual_state == 0:
return web.json_response(deployment_response)
elif actual_state >= 1:
return web.json_response(updated_deployment_response)
@routes.patch("/apis/apps/v1/namespaces/secondary/deployments/nginx-demo")
async def _(request):
nonlocal actual_state
if actual_state == 0:
return web.json_response(deployment_response)
elif actual_state >= 1:
return web.json_response(updated_deployment_response)
@routes.patch("/api/v1/namespaces/secondary/services/nginx-demo")
async def _(request):
assert actual_state in (0, 2)
return web.json_response(service_response)
@routes.delete("/api/v1/namespaces/secondary/services/nginx-demo")
async def _(request):
nonlocal actual_state
assert actual_state == 2
return web.Response(status=200)
kubernetes_app = web.Application()
kubernetes_app.add_routes(routes)
kubernetes_server = await aiohttp_server(kubernetes_app)
cluster = ClusterFactory(spec__kubeconfig=make_kubeconfig(kubernetes_server))
cluster_ref = resource_ref(cluster)
app = ApplicationFactory(
status__state=ApplicationState.RUNNING,
status__running_on=cluster_ref,
status__scheduled_to=cluster_ref,
spec__observer_schema=[
custom_deployment_observer_schema,
custom_service_observer_schema,
],
status__last_observed_manifest=[
initial_last_observed_manifest_deployment,
initial_last_observed_manifest_service,
],
spec__manifest=deepcopy([deployment_manifest, service_manifest]),
metadata__finalizers=["kubernetes_resources_deletion"],
)
await db.put(cluster)
await db.put(app)
server = await aiohttp_server(create_app(config))
def update_decorator(func):
async def on_res_update(resource):
# As the update on resources is performed by the API, the Observer should
# never see a difference on the actual resource, and thus, the current
# function should never be called
assert False
return on_res_update
async def mock():
# When a resource is updated, the task corresponding to the observer is stopped
# automatically. This mock is used as a fake task to cancel
await asyncio.sleep(1)
async with Client(url=server_endpoint(server), loop=loop) as client:
controller = KubernetesController(server_endpoint(server), worker_count=0)
controller.on_status_update = update_decorator(controller.on_status_update)
await controller.prepare(client)
##
# In state 0
##
# Create actual application, starts the Observer
# ``start_observer`` prevents starting the observer as background task
await controller.resource_received(app, start_observer=False)
obs, _ = controller.observers[app.metadata.uid]
controller.observers[app.metadata.uid] = (obs, loop.create_task(mock()))
# Observe an unmodified resource
after_0 = await check_observer_does_not_update(obs, app, db)
##
# Modify the image version on the API, and observe --> go into state 1
##
actual_state = 1
# Modify the manifest of the Application
first_container = get_first_container(after_0.spec.manifest[0])
first_container["image"] = "nginx:1.6"
after_0.status.state = ApplicationState.RUNNING
await db.put(after_0)
# Update the actual resource
await controller.resource_received(after_0, start_observer=False)
obs, _ = controller.observers[app.metadata.uid]
controller.observers[app.metadata.uid] = (obs, loop.create_task(mock()))
# Assert the resource on the observer has been updated.
first_container = get_first_container(obs.resource.spec.manifest[0])
assert first_container["image"] == "nginx:1.6"
# Status should not be updated by observer
after_1 = await check_observer_does_not_update(obs, app, db)
##
# Remove the service on the API, and observe--> go into state 2
##
actual_state = 2
# Modify the manifest and the observer_schema of the Application
after_1.spec.manifest = after_1.spec.manifest[:1]
after_1.spec.observer_schema = after_1.spec.observer_schema[:1]
after_1.status.state = ApplicationState.RUNNING
await db.put(after_1)
# Update the actual resource
await controller.resource_received(after_1, start_observer=False)
obs, _ = controller.observers[app.metadata.uid]
controller.observers[app.metadata.uid] = (obs, loop.create_task(mock()))
# Status should not be updated by observer
await check_observer_does_not_update(obs, app, db)
async def test_observer_on_delete(aiohttp_server, config, db, loop):
"""Test the behavior of the Kubernetes Controller and Observer when an application
is being deleted.
"""
routes = web.RouteTableDef()
@routes.get("/api/v1/namespaces/secondary/services/nginx-demo")
async def _(request):
return web.json_response(service_response)
@routes.get("/apis/apps/v1/namespaces/secondary/deployments/nginx-demo")
async def _(request):
return web.json_response(deployment_response)
@routes.get("/api/v1/namespaces/secondary/configmaps/nginx-demo")
async def _(request):
return web.json_response(configmap_response)
@routes.delete("/apis/apps/v1/namespaces/secondary/deployments/nginx-demo")
async def _(request):
return web.Response(status=200)
@routes.delete("/api/v1/namespaces/secondary/services/nginx-demo")
async def _(request):
return web.Response(status=200)
@routes.delete("/api/v1/namespaces/secondary/configmaps/nginx-demo")
async def _(request):
return web.Response(status=200)
kubernetes_app = web.Application()
kubernetes_app.add_routes(routes)
kubernetes_server = await aiohttp_server(kubernetes_app)
cluster = ClusterFactory(spec__kubeconfig=make_kubeconfig(kubernetes_server))
app = ApplicationFactory(
metadata__deleted=fake.date_time(),
status__state=ApplicationState.RUNNING,
status__mangled_observer_schema=mangled_observer_schema,
status__last_observed_manifest=initial_last_observed_manifest,
status__running_on=resource_ref(cluster),
spec__manifest=nginx_manifest,
metadata__finalizers=["kubernetes_resources_deletion"],
)
await db.put(cluster)
await db.put(app)
server = await aiohttp_server(create_app(config))
async with Client(url=server_endpoint(server), loop=loop) as client:
controller = KubernetesController(
server_endpoint(server), worker_count=0, time_step=100
)
await controller.prepare(client)
# Start the observer, which will not observe due to time step
await register_observer(controller, app)
observer, _ = controller.observers[app.metadata.uid]
# Observe a resource actually in deletion.
before = await db.get(
Application, namespace=app.metadata.namespace, name=app.metadata.name
)
await observer.observe_resource()
after = await db.get(
Application, namespace=app.metadata.namespace, name=app.metadata.name
)
assert after == before
# Clean the application resources
await controller.resource_received(app)
# The observer task should be cancelled
assert app.metadata.uid not in controller.observers
@pytest.mark.slow
async def test_observer_creation_deletion(aiohttp_server, config, db, loop):
"""Test the creation and cleanup of the observers when Applications are received by
the reflector.
"""
routes = web.RouteTableDef()
@routes.get("/api/v1/namespaces/secondary/services/nginx-demo")
async def _(_):
return web.json_response(service_response)
@routes.get("/apis/apps/v1/namespaces/secondary/deployments/nginx-demo")
async def _(_):
return web.json_response(deployment_response)
kubernetes_app = web.Application()
kubernetes_app.add_routes(routes)
kubernetes_server = await aiohttp_server(kubernetes_app)
cluster = ClusterFactory(spec__kubeconfig=make_kubeconfig(kubernetes_server))
scheduled_apps = [
ApplicationFactory(
status__state=ApplicationState.RUNNING,
status__mangled_observer_schema=mangled_observer_schema,
status__last_observed_manifest=initial_last_observed_manifest,
status__running_on=resource_ref(cluster),
spec__manifest=nginx_manifest,
metadata__finalizers=["kubernetes_resources_deletion"],
)
for _ in range(2)
]
await db.put(cluster)
for scheduled in scheduled_apps:
await db.put(scheduled)
server = await aiohttp_server(create_app(config))
controller = KubernetesController(
server_endpoint(server), worker_count=0, time_step=1
)
run_task = None
try:
run_task = loop.create_task(controller.run())
# Wait for the observers to poll their resource.
await asyncio.sleep(3)
assert len(controller.observers) == 2
finally:
# Trigger the cleanup
if run_task is not None:
run_task.cancel()
with suppress(asyncio.CancelledError):
await run_task
assert len(controller.observers) == 0
def test_update_last_applied_manifest_from_spec():
"""Test the ``update_last_applied_manifest_from_spec`` function.
An application containing a Deployment is created. The default observer schema is
used.
State (0):
`last_applied_manifest` is empty. It should be initialized to spec.manifest
State (1):
The Deployment's manifest file specifies a value for the previously unset
`revisionHistoryLimit` and `progressDeadlineSeconds`. Only the first one is
observed.
State (2):
The Deployment's manifest file specifies a new value for the previously set
`revisionHistoryLimit` and `progressDeadlineSeconds`.
State (3):
The Deployment's manifest doesn't specify the previously set
`revisionHistoryLimit` and `progressDeadlineSeconds`.
"""
app = ApplicationFactory(spec__manifest=deepcopy([deployment_manifest]))
# State (0): last_applied_manifest` is empty. It should be initialized to
# spec.manifest
generate_default_observer_schema(app)
update_last_applied_manifest_from_spec(app)
assert app.status.last_applied_manifest == app.spec.manifest
# State (1): The Deployment's manifest file specifies a value for the previously
# unset `revisionHistoryLimit` and `progressDeadlineSeconds`. Only the first one is
# observed.
app.status.mangled_observer_schema[0]["spec"]["revisionHistoryLimit"] = None
app.spec.manifest[0]["spec"]["revisionHistoryLimit"] = 20
app.spec.manifest[0]["spec"]["progressDeadlineSeconds"] = 300
# Both values should be initialized
update_last_applied_manifest_from_spec(app)
assert app.status.last_applied_manifest[0]["spec"]["revisionHistoryLimit"] == 20
assert app.status.last_applied_manifest[0]["spec"]["progressDeadlineSeconds"] == 300
# State (2): The Deployment's manifest file specifies a new value for previously
# set `revisionHistoryLimit` and `progressDeadlineSeconds`.
app.spec.manifest[0]["spec"]["revisionHistoryLimit"] = 40
app.spec.manifest[0]["spec"]["progressDeadlineSeconds"] = 600
# Both values should be updated
update_last_applied_manifest_from_spec(app)
assert app.status.last_applied_manifest[0]["spec"]["revisionHistoryLimit"] == 40
assert app.status.last_applied_manifest[0]["spec"]["progressDeadlineSeconds"] == 600
# State (3): The Deployment's manifest doesn't specify previously set
# `revisionHistoryLimit` and `progressDeadlineSeconds`.
app.spec.manifest[0]["spec"].pop("revisionHistoryLimit")
app.spec.manifest[0]["spec"].pop("progressDeadlineSeconds")
# Only the observed field should be kept.
update_last_applied_manifest_from_spec(app)
assert app.status.last_applied_manifest[0]["spec"]["revisionHistoryLimit"] == 40
assert "progressDeadlineSeconds" not in app.status.last_applied_manifest[0]["spec"]
def test_update_last_applied_manifest_from_resp(loop):
"""Test the ``update_last_applied_manifest_from_resp`` function.
This function is called to update ``status.last_applied_manifest`` from a
Kubernetes response. Only observed fields which are not yet initialized should be
created by this function.
State (0):
A Deployment and a Service are present, the Deployment has an nginx image with
version "1.7.9". The Service defines 1 port using the "TCP" protocol. A custom
observer schema is used:
- It observes the Deployment's image, initialized by the given manifest file.
- It observes the Deployment's replicas count, initialized by k8s to 1.
- The Service's first port's protocol, initialized in the manifest file, is
*not* observed
- It accepts between 0 and 2 ports.
State (1):
The Deployment image version changed to "1.6".
State (2):
The Deployment replicas count is changed to 2.
State (3):
The Service's first port's protocol is changed to "UDP"
State (4):
A second port is added to the Service.
State (5):
A third port is added to the Service.
State (6):
All ports are removed from the Service.
"""
cluster = ClusterFactory()
# Create an application using a custom observer schema
# The last_applied_manifest is initialized with the `spec.manifest` as in the
# normal workflow (taking aside mangling)
app = ApplicationFactory(
status__state=ApplicationState.RUNNING,
status__running_on=resource_ref(cluster),
spec__manifest=[deployment_manifest, service_manifest],
spec__observer_schema=[
custom_deployment_observer_schema,
custom_service_observer_schema,
],
status__last_applied_manifest=[deployment_manifest, service_manifest],
)
generate_default_observer_schema(app)
# Create k8s objects from the k8s response
copy_deployment_response = deepcopy(deployment_response)
copy_service_response = deepcopy(service_response)
deployment_object = serialize_k8s_object(copy_deployment_response, "V1Deployment")
service_object = serialize_k8s_object(copy_service_response, "V1Service")
# State 0: Standard response from the k8s cluster
original_replicas_count = copy_deployment_response["spec"]["replicas"]
# `spec.replicas` is not initialized in `nginx-manifest` but is present in the
# `observer_schema`. It is initialized in the first call to the function.
update_last_applied_manifest_from_resp(app, None, None, deployment_object)
assert (
app.status.last_applied_manifest[0]["spec"]["replicas"]
== original_replicas_count
)
# State (1): Change the Deployment's image in the k8s response
first_container_resp = get_first_container(copy_deployment_response)
first_container_resp["image"] = "nginx:1.6"
deployment_object = serialize_k8s_object(copy_deployment_response, "V1Deployment")
first_container_manifest = get_first_container(nginx_manifest[0])
# As this field is initialized in `nginx-manifest`, its value is not updated.
update_last_applied_manifest_from_resp(app, None, None, deployment_object)
first_container_app = get_first_container(app.status.last_applied_manifest[0])
assert first_container_app["image"] == first_container_manifest["image"]
# State (2): Change the Deployment's replicas count to 2
deployment_object.spec.replicas = 2
# The field is observed and has already been initialized. No new update to
# `last_applied_manifest` should occur from a Kubernetes response.
update_last_applied_manifest_from_resp(app, None, None, deployment_object)
assert (
app.status.last_applied_manifest[0]["spec"]["replicas"]
== original_replicas_count
)
# State (3): Change the Service's first port's protocol to "UDP"
service_object.spec.ports[0].protocol = "UDP"
# The field is not observed and is initialized by `nginx-manifest`. No update should
# occur
update_last_applied_manifest_from_resp(app, None, None, service_object)
assert (
app.status.last_applied_manifest[1]["spec"]["ports"][0]["protocol"]
== nginx_manifest[1]["spec"]["ports"][0]["protocol"]
)
# State (4): A second port is added to the Service.
copy_service_response["spec"]["ports"].append(
{"nodePort": 32567, "port": 81, "protocol": "TCP", "targetPort": 81}
)
service_object = serialize_k8s_object(copy_service_response, "V1Service")
# Only the first port is observed: No update should occur
update_last_applied_manifest_from_resp(app, None, None, service_object)
assert len(app.status.last_applied_manifest[1]["spec"]["ports"]) == 1
# State (5): A third port is added to the Service.
copy_service_response["spec"]["ports"].append(
{"nodePort": 32568, "port": 82, "protocol": "TCP", "targetPort": 82}
)
service_object = serialize_k8s_object(copy_service_response, "V1Service")
# Only the first port is observed: No update should occur
update_last_applied_manifest_from_resp(app, None, None, service_object)
assert len(app.status.last_applied_manifest[1]["spec"]["ports"]) == 1
# State (6): All ports are removed from the Service.
copy_service_response["spec"]["ports"] = []
service_object = serialize_k8s_object(copy_service_response, "V1Service")
# The first port is observed and initialized by `nginx_manifest`. No update should
# occur
update_last_applied_manifest_from_resp(app, None, None, service_object)
assert len(app.status.last_applied_manifest[1]["spec"]["ports"]) == 1
def test_update_last_observed_manifest_from_resp(loop):
"""Test the ``update_last_observed_manifest_from_resp`` function.
This function is called to update ``status.last_observed_manifest`` from a
Kubernetes response. Observed fields only are present and updated.
State (0):
A Deployment and a Service are present, the Deployment has an nginx image with
version "1.7.9". The service defines 1 port using the "TCP" protocol. A custom
observer schema is used:
- It observes the Deployment's image, initialized by the given manifest file.
- It observes the Deployment's replicas count, initialized by k8s to 1.
- The Service's first port's protocol, initialized in the manifest file, is
*not* observed
- It accepts between 0 and 2 ports.
State (1):
The Deployment image version changed to "1.6".
State (2):
The Deployment replicas count is changed to 2.
State (3):
The Service's first port's protocol is changed to "UDP"
State (4):
A second port is added to the Service.
State (5):
A third port is added to the Service.
State (6):
All ports are removed from the Service.
"""
cluster = ClusterFactory()
# Create an application using a custom observer schema
# The `last_observed_manifest` is left empty, as is the normal workflow, and is
# initialized by the first call to `update_last_observed_manifest_from_resp`
app = ApplicationFactory(
status__state=ApplicationState.RUNNING,
status__running_on=resource_ref(cluster),
spec__manifest=nginx_manifest,
spec__observer_schema=custom_observer_schema,
)
generate_default_observer_schema(app)
# Create k8s object from the k8s response
copy_deployment_response = deepcopy(deployment_response)
copy_service_response = deepcopy(service_response)
deployment_object = serialize_k8s_object(copy_deployment_response, "V1Deployment")
service_object = serialize_k8s_object(copy_service_response, "V1Service")
# State 0: Standard response from the k8s cluster, while last_observed_manifest is
# empty
# Update the Deployment observed manifest from the standard response.
update_last_observed_manifest_from_resp(app, None, None, deployment_object)
assert app.status.last_observed_manifest[0] == initial_last_observed_manifest[0]
# Update the Service observed manifest from the standard response.
update_last_observed_manifest_from_resp(app, None, None, service_object)
assert app.status.last_observed_manifest[1] == initial_last_observed_manifest[1]
# State (1): Change the Deployment's image in the k8s response
first_container_resp = get_first_container(copy_deployment_response)
first_container_resp["image"] = "nginx:1.6"
deployment_object = serialize_k8s_object(copy_deployment_response, "V1Deployment")
# This field is observed, therefore it should be updated in `last_observed_manifest`
update_last_observed_manifest_from_resp(app, None, None, deployment_object)
first_container_app = get_first_container(app.status.last_observed_manifest[0])
assert first_container_app["image"] == first_container_resp["image"]
# State (2): Change the Deployment's replicas count to 2
deployment_object.spec.replicas = 2
# This field is observed, therefore it should be updated in `last_observed_manifest`
update_last_observed_manifest_from_resp(app, None, None, deployment_object)
assert (
app.status.last_observed_manifest[0]["spec"]["replicas"]
== deployment_object.spec.replicas
)
# State (3): Change the Service's first port's protocol to "UDP"
service_object.spec.ports[0].protocol = "UDP"
# The field is not observed, therefore it shouldn't be present in
# `last_observed_manifest`
update_last_observed_manifest_from_resp(app, None, None, service_object)
assert "protocol" not in app.status.last_observed_manifest[1]["spec"]["ports"][0]
# State (4): A second port is added to the Service.
copy_service_response["spec"]["ports"].append(
{"nodePort": 32567, "port": 81, "protocol": "TCP", "targetPort": 81}
)
service_object = serialize_k8s_object(copy_service_response, "V1Service")
# The length of the list should be updated in `last_observed_manifest`
update_last_observed_manifest_from_resp(app, None, None, service_object)
assert (
app.status.last_observed_manifest[1]["spec"]["ports"][-1][
"observer_schema_list_current_length"
]
== 2
)
# State (5): A third port is added to the Service.
copy_service_response["spec"]["ports"].append(
{"nodePort": 32568, "port": 82, "protocol": "TCP", "targetPort": 82}
)
service_object = serialize_k8s_object(copy_service_response, "V1Service")
# The length of the list should be updated in `last_observed_manifest`
update_last_observed_manifest_from_resp(app, None, None, service_object)
assert (
app.status.last_observed_manifest[1]["spec"]["ports"][-1][
"observer_schema_list_current_length"
]
== 3
)
# State (6): All ports are removed from the Service.
service_object.spec.ports = []
# The length of the list should be updated in `last_observed_manifest`
# Also, the first port should not be present in `last_observed_manifest` anymore.
# The list of ports only contains the special control dictionary
update_last_observed_manifest_from_resp(app, None, None, service_object)
assert (
app.status.last_observed_manifest[1]["spec"]["ports"][-1][
"observer_schema_list_current_length"
]
== 0
)
assert len(app.status.last_observed_manifest[1]["spec"]["ports"]) == 1
| 38.656095
| 88
| 0.694831
| 7,788
| 62,159
| 5.330508
| 0.051233
| 0.025702
| 0.038059
| 0.017199
| 0.814231
| 0.781809
| 0.753963
| 0.724478
| 0.704365
| 0.667943
| 0
| 0.013413
| 0.2228
| 62,159
| 1,607
| 89
| 38.680149
| 0.845912
| 0.191734
| 0
| 0.718478
| 0
| 0
| 0.100292
| 0.043862
| 0
| 0
| 0
| 0
| 0.122826
| 1
| 0.006522
| false
| 0
| 0.018478
| 0
| 0.078261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ae0aa85d8b51e2b02e94e1265d72cbd0807f5d92
| 9,779
|
py
|
Python
|
eisy/test/test_circuits.py
|
EISy-as-Py/EISy-as-Py
|
3086ecd043fce4d8ba49ec55004340a5444c0eb0
|
[
"MIT"
] | 5
|
2020-02-06T21:38:47.000Z
|
2020-02-13T20:29:44.000Z
|
eisy/test/test_circuits.py
|
EISy-as-Py/EISy-as-Py
|
3086ecd043fce4d8ba49ec55004340a5444c0eb0
|
[
"MIT"
] | 2
|
2020-03-11T22:06:21.000Z
|
2020-05-18T17:22:43.000Z
|
eisy/test/test_circuits.py
|
EISy-as-Py/EISy_as_Py
|
3086ecd043fce4d8ba49ec55004340a5444c0eb0
|
[
"MIT"
] | 4
|
2020-03-13T20:35:04.000Z
|
2020-03-18T21:56:28.000Z
|
import numpy as np
import unittest
import eisy.simulation.circuits as circuits
# Define the common variables to be used for as a testing dataset
high_freq = 10**8 # Hz
low_freq = 0.01 # Hz
decades = 10
Resistance = 10
Parallel_Resistance = 100
Capacitance = 10**-6
Constant_phase_element = 10**-6
alpha = 1
sigma = 500
f_range = circuits.freq_gen(high_freq, low_freq, decades)
class TestSimulationTools(unittest.TestCase):
def test_freq_gen(self):
f_range = circuits.freq_gen(high_freq, low_freq, decades)
assert isinstance(decades, int),\
'the number of decades should be an integer'
assert high_freq >= low_freq,\
'the low frequency should be smaller than the high' +\
'frequency limit value. Check again.'
assert max(f_range[0])-min(f_range[0]) == high_freq - low_freq, \
'The frequency range returned is invalid'
assert len(f_range[0]) == len(f_range[1]), \
'The output returned is invalid'
def test_RC_parallel(self):
response = circuits.cir_RC_parallel(f_range[1], R=Resistance,
C=Capacitance)
assert np.positive(Resistance), \
'The input resistance is invalid'
assert np.positive(Capacitance), \
'The input capacitance is invalid'
assert isinstance(Capacitance, float), \
'the capacitance should be a float, not an integer'
assert len(response) == len(f_range[1]), \
'The returned response is not valid'
for item in response:
assert isinstance(item, complex),\
'The returned response includes invalid impedance'
def test_RC_series(self):
response = circuits.cir_RC_series(f_range[1], R=Resistance,
C=Capacitance)
assert np.positive(Resistance), \
'The input resistance is invalid'
assert np.positive(Capacitance), \
'The input capacitance is invalid'
assert isinstance(Capacitance, float), \
'the capacitance should be a float, not an integer'
assert len(response) == len(f_range[1]), \
'The returned response is not valid'
for item in response:
assert isinstance(item, complex),\
'The returned response includes invalid impedance'
def test_RQ_parallel(self):
response = circuits.cir_RQ_parallel(f_range[1], R=Resistance,
Q=Constant_phase_element,
alpha=alpha)
assert np.positive(Resistance), \
'The input resistance is invalid'
assert np.positive(Constant_phase_element), \
'The input phase element is invalid'
assert len(response) == len(f_range[1]), \
'The returned response is not valid'
for item in response:
assert isinstance(item, complex), \
'The returned response includes invalid impedance'
def test_RQ_series(self):
response = circuits.cir_RQ_series(f_range[1], R=Resistance,
Q=Constant_phase_element,
alpha=alpha)
assert np.positive(Resistance), \
'The input resistance is invalid'
assert np.positive(Constant_phase_element), \
'The input phase element is invalid'
assert len(response) == len(f_range[1]), \
'The returned response is not valid'
for item in response:
assert isinstance(item, complex), \
'The returned response includes invalid impedance'
def test_RsRC(self):
response = circuits.cir_RsRC(f_range[1], Rs=Resistance,
Rp=Parallel_Resistance,
C=Capacitance)
assert np.positive(Resistance), \
'The input resistance is invalid'
assert np.positive(Parallel_Resistance), \
'The input resistance is invalid'
assert np.positive(Capacitance), \
'The input capacitance is invalid'
assert len(response) == len(f_range[1]), \
'The returned response is not valid'
for item in response:
assert isinstance(item, complex), \
'The returned response includes invalid impedance'
real_Z = item.real
imag_Z = item.imag
total_Z = np.sqrt((real_Z**2) + (imag_Z**2))
assert total_Z > Resistance,\
'The Impedance value returned is lower than the' +\
'Solution Resistance'
def test_RsRQ(self):
response = circuits.cir_RsRQ(f_range[1], Rs=Resistance,
Rp=Parallel_Resistance,
Q=Constant_phase_element,
alpha=alpha)
assert np.positive(Resistance), \
'The input resistance is invalid'
assert np.positive(Parallel_Resistance), \
'The input resistance is invalid'
assert np.positive(Constant_phase_element), \
'The input phase element is invalid'
assert len(response) == len(f_range[1]), \
'The returned response is not valid'
assert len(response) == len(f_range[1]), \
'The returned response is not valid'
for item in response:
assert isinstance(item, complex), \
'The returned response includes invalid impedance'
real_Z = item.real
imag_Z = item.imag
total_Z = np.sqrt((real_Z**2) + (imag_Z**2))
assert total_Z > Resistance,\
'The Impedance value returned is lower than the' +\
'Solution Resistance'
def test_RsRQRQ(self):
response = circuits.cir_RsRQRQ(f_range[1], Rs=Resistance,
Rp1=Parallel_Resistance,
Q1=Constant_phase_element,
alpha1=alpha,
Rp2=Parallel_Resistance,
Q2=Constant_phase_element,
alpha2=alpha)
assert np.positive(Resistance), \
'The input resistance is invalid'
assert np.positive(Parallel_Resistance), \
'The input resistance is invalid'
assert np.positive(Constant_phase_element), \
'The input phase element is invalid'
assert alpha > 0 or alpha <= 1, 'The values of alpha is invalid'
assert len(response) == len(f_range[1]), \
'The returned response is not valid'
for item in response:
assert isinstance(item, complex), \
'The returned response includes invalid impedance'
real_Z = item.real
imag_Z = item.imag
total_Z = np.sqrt((real_Z**2) + (imag_Z**2))
assert total_Z > Resistance, \
'The Impedance value returned is lower than the' +\
'Solution Resistance'
def test_RsRCRC(self):
response = circuits.cir_RsRCRC(f_range[1], Rs=Resistance,
Rp1=Parallel_Resistance,
C1=Capacitance,
Rp2=Parallel_Resistance,
C2=Capacitance)
assert np.positive(Resistance), \
'The input resistance is invalid'
assert np.positive(Parallel_Resistance), \
'The input resistance is invalid'
assert np.positive(Capacitance), \
'The input capacitance is invalid'
assert len(response) == len(f_range[1]), \
'The returned response is not valid'
for item in response:
assert isinstance(item, complex), \
'The returned response includes invalid impedance'
real_Z = item.real
imag_Z = item.imag
total_Z = np.sqrt((real_Z**2) + (imag_Z**2))
assert total_Z > Resistance,\
'The Impedance value returned is lower than the' +\
'Solution Resistance'
def test_randles(self):
response = circuits.cir_Randles_simplified(f_range[1],
Rs=Resistance,
Rp=Parallel_Resistance,
alpha=alpha,
sigma=sigma,
Q=Constant_phase_element)
assert np.positive(Resistance), \
'The input resistance is invalid'
assert np.positive(Parallel_Resistance), \
'The input resistance is invalid'
assert alpha > 0 or alpha <= 1, \
'The values of alpha is nonpositive'
assert np.positive(Constant_phase_element), \
'The input constant phase element is non-positive'
assert np.positive(sigma), 'The input coefficient is non-positive'
assert len(response) == len(f_range[1]),\
'The returned response is not valid'
for item in response:
assert isinstance(item, complex), \
'The returned response includes invalid impedance'
real_Z = item.real
imag_Z = item.imag
total_Z = np.sqrt((real_Z**2) + (imag_Z**2))
assert total_Z > Resistance, \
'The Impedance value returned is lower than the' +\
'Solution Resistance'
| 39.273092
| 76
| 0.551181
| 1,046
| 9,779
| 5.025813
| 0.112811
| 0.028533
| 0.06848
| 0.074567
| 0.815294
| 0.781625
| 0.776298
| 0.776298
| 0.734259
| 0.734259
| 0
| 0.011382
| 0.371101
| 9,779
| 248
| 77
| 39.431452
| 0.843415
| 0.007056
| 0
| 0.72
| 0
| 0
| 0.230476
| 0
| 0
| 0
| 0
| 0
| 0.28
| 1
| 0.05
| false
| 0
| 0.015
| 0
| 0.07
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ae5af52a99b15797e20520e9f244b8999ef66ead
| 238,286
|
py
|
Python
|
position/models.py
|
josephevans/WeVoteServer
|
137f3037ba9984e036eb146aeeeecb4e979c21e1
|
[
"MIT"
] | null | null | null |
position/models.py
|
josephevans/WeVoteServer
|
137f3037ba9984e036eb146aeeeecb4e979c21e1
|
[
"MIT"
] | null | null | null |
position/models.py
|
josephevans/WeVoteServer
|
137f3037ba9984e036eb146aeeeecb4e979c21e1
|
[
"MIT"
] | null | null | null |
# position/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
# Diagrams here: https://docs.google.com/drawings/d/1DsPnl97GKe9f14h41RPeZDssDUztRETGkXGaolXCeyo/edit
from candidate.models import CandidateCampaign, CandidateCampaignManager, CandidateCampaignListManager
from ballot.controllers import figure_out_google_civic_election_id_voter_is_watching
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.db import models
from django.db.models import Q
from election.models import Election
from exception.models import handle_exception, handle_record_found_more_than_one_exception,\
handle_record_not_found_exception, handle_record_not_saved_exception
from measure.models import ContestMeasure, ContestMeasureList, ContestMeasureManager
from office.models import ContestOfficeManager
from organization.models import Organization, OrganizationManager
from twitter.models import TwitterUser
from voter.models import fetch_voter_id_from_voter_we_vote_id, fetch_voter_we_vote_id_from_voter_id, Voter, VoterManager
from voter_guide.models import VoterGuideManager
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists
from wevote_settings.models import fetch_next_we_vote_id_last_position_integer, fetch_site_unique_id_prefix
ANY_STANCE = 'ANY_STANCE' # This is a way to indicate when we want to return any stance (support, oppose, no_stance)
SUPPORT = 'SUPPORT'
STILL_DECIDING = 'STILL_DECIDING'
NO_STANCE = 'NO_STANCE' # DALE 2016-8-29 We will want to deprecate NO_STANCE and replace with INFORMATION_ONLY
INFORMATION_ONLY = 'INFO_ONLY'
OPPOSE = 'OPPOSE'
PERCENT_RATING = 'PERCENT_RATING'
POSITION_CHOICES = (
# ('SUPPORT_STRONG', 'Strong Supports'), # I do not believe we will be offering 'SUPPORT_STRONG' as an option
(SUPPORT, 'Supports'),
(STILL_DECIDING, 'Still deciding'), # Still undecided
(NO_STANCE, 'No stance'), # We don't know the stance
(INFORMATION_ONLY, 'Information only'), # This entry is meant as food-for-thought and is not advocating
(OPPOSE, 'Opposes'),
(PERCENT_RATING, 'Percentage point rating'),
# ('OPPOSE_STRONG', 'Strongly Opposes'), # I do not believe we will be offering 'OPPOSE_STRONG' as an option
)
# friends_vs_public
FRIENDS_AND_PUBLIC = 'FRIENDS_AND_PUBLIC'
FRIENDS_ONLY = 'FRIENDS_ONLY'
PUBLIC_ONLY = 'PUBLIC_ONLY'
SHOW_PUBLIC = 'SHOW_PUBLIC'
logger = wevote_functions.admin.get_logger(__name__)
# TODO DALE Consider adding vote_smart_sig_id and vote_smart_candidate_id fields so we can export them and to prevent
# duplicate position entries from Vote Smart
class PositionEntered(models.Model):
"""
Any public position entered by any organization or candidate gets its own PositionEntered entry.
NOTE: We also have PositionForFriends table that is exactly the same structure as PositionEntered. It holds
opinions that voters only want to share with friends.
"""
# We are relying on built-in Python id field
# The we_vote_id identifier is unique across all We Vote sites, and allows us to share our org info with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "pos", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.we_vote_id_last_position_integer
we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True, unique=True)
# The id for the generated position that this PositionEntered entry influences
position_id = models.BigIntegerField(null=True, blank=True) # NOT USED CURRENTLY
test = models.BigIntegerField(null=True, blank=True)
ballot_item_display_name = models.CharField(verbose_name="text name for ballot item",
max_length=255, null=True, blank=True)
# We cache the url to an image for the candidate, measure or office for rapid display
ballot_item_image_url_https = models.URLField(verbose_name='url of https image for candidate, measure or office',
blank=True, null=True)
ballot_item_twitter_handle = models.CharField(verbose_name='twitter screen_name for candidate, measure, or office',
max_length=255, null=True, unique=False)
# What is the organization name, voter name, or public figure name? We cache this here for rapid display
speaker_display_name = models.CharField(
verbose_name="name of the org or person with position", max_length=255, null=True, blank=True, unique=False)
# We cache the url to an image for the org, voter, or public_figure for rapid display
speaker_image_url_https = models.URLField(verbose_name='url of https image for org or person with position',
blank=True, null=True)
speaker_twitter_handle = models.CharField(verbose_name='twitter screen_name for org or person with position',
max_length=255, null=True, unique=False)
date_entered = models.DateTimeField(verbose_name='date entered', null=True, auto_now=True)
# The date the this position last changed
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
# The organization this position is for
organization_id = models.BigIntegerField(null=True, blank=True)
organization_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the organization", max_length=255, null=True,
blank=True, unique=False)
# The voter expressing the opinion
# Note that for organizations who have friends, the voter_we_vote_id is what we use to link to the friends
# (in the PositionForFriends table).
# Public positions from an organization are shared via organization_we_vote_id (in PositionEntered table), while
# friend's-only positions are shared via voter_we_vote_id.
voter_id = models.BigIntegerField(null=True, blank=True)
voter_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the voter expressing the opinion", max_length=255, null=True,
blank=True, unique=False)
# The unique id of the public figure expressing the opinion. May be null if position is from org or voter
# instead of public figure.
public_figure_we_vote_id = models.CharField(
verbose_name="public figure we vote id", max_length=255, null=True, blank=True, unique=False)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(verbose_name="google civic election id",
max_length=255, null=True, blank=False, default=0)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=True, blank=True)
# State code
state_code = models.CharField(verbose_name="us state of the ballot item position is for",
max_length=2, null=True, blank=True)
# ### Values from Vote Smart ###
vote_smart_rating_id = models.BigIntegerField(null=True, blank=True, unique=False)
# Usually in one of these two formats 2015, 2014-2015
vote_smart_time_span = models.CharField(
verbose_name="the period in which the organization stated this position", max_length=255, null=True,
blank=True, unique=False)
vote_smart_rating = models.CharField(
verbose_name="vote smart value between 0-100", max_length=255, null=True,
blank=True, unique=False)
vote_smart_rating_name = models.CharField(max_length=255, null=True, blank=True, unique=False)
# The unique We Vote id of the tweet that is the source of the position
tweet_source_id = models.BigIntegerField(null=True, blank=True)
# This is the voter / authenticated user who entered the position for an organization
# (NOT the voter expressing opinion)
voter_entering_position = models.ForeignKey(
Voter, verbose_name='authenticated user who entered position', null=True, blank=True)
# The Twitter user account that generated this position
twitter_user_entered_position = models.ForeignKey(TwitterUser, null=True, verbose_name='')
# This is the office that the position refers to.
# Either contest_measure is filled, contest_office OR candidate_campaign, but not all three
contest_office_id = models.BigIntegerField(verbose_name='id of contest_office', null=True, blank=True)
contest_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the contest_office", max_length=255, null=True, blank=True, unique=False)
contest_office_name = models.CharField(verbose_name="name of the office", max_length=255, null=True, blank=True)
# This is the candidate/politician that the position refers to.
# Either candidate_campaign is filled, contest_office OR contest_measure, but not all three
candidate_campaign_id = models.BigIntegerField(verbose_name='id of candidate_campaign', null=True, blank=True)
candidate_campaign_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the candidate_campaign", max_length=255, null=True,
blank=True, unique=False)
# The candidate's name as passed over by Google Civic. We save this so we can match to this candidate if an import
# doesn't include a we_vote_id we recognize.
google_civic_candidate_name = models.CharField(verbose_name="candidate name exactly as received from google civic",
max_length=255, null=True, blank=True)
# The measure's title as passed over by Google Civic. We save this so we can match to this measure if an import
# doesn't include a we_vote_id we recognize.
google_civic_measure_title = models.CharField(verbose_name="measure title exactly as received from google civic",
max_length=255, null=True, blank=True)
# Useful for queries based on Politicians -- not the main table we use for ballot display though
politician_id = models.BigIntegerField(verbose_name='', null=True, blank=True)
politician_we_vote_id = models.CharField(
verbose_name="we vote permanent id for politician", max_length=255, null=True,
blank=True, unique=False)
political_party = models.CharField(verbose_name="political party", max_length=255, null=True)
# This is the measure/initiative/proposition that the position refers to.
# Either contest_measure is filled, contest_office OR candidate_campaign, but not all three
contest_measure_id = models.BigIntegerField(verbose_name='id of contest_measure', null=True, blank=True)
contest_measure_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the contest_measure", max_length=255, null=True,
blank=True, unique=False)
# Strategic denormalization - this is redundant but will make generating the voter guide easier.
# geo = models.ForeignKey(Geo, null=True, related_name='pos_geo')
# issue = models.ForeignKey(Issue, null=True, blank=True, related_name='')
stance = models.CharField(max_length=15, choices=POSITION_CHOICES, default=NO_STANCE) # supporting/opposing
statement_text = models.TextField(null=True, blank=True,)
statement_html = models.TextField(null=True, blank=True,)
# A link to any location with more information about this position
more_info_url = models.URLField(blank=True, null=True, verbose_name='url with more info about this position')
# Did this position come from a web scraper?
from_scraper = models.BooleanField(default=False)
# Was this position certified by an official with the organization?
organization_certified = models.BooleanField(default=False)
# Was this position certified by an official We Vote volunteer?
volunteer_certified = models.BooleanField(default=False)
# link = models.URLField(null=True, blank=True,)
# link_title = models.TextField(null=True, blank=True, max_length=128)
# link_site = models.TextField(null=True, blank=True, max_length=64)
# link_txt = models.TextField(null=True, blank=True)
# link_img = models.URLField(null=True, blank=True)
# Set this to True after getting all the link details (title, txt, img etc)
# details_loaded = models.BooleanField(default=False)
# video_embed = models.URLField(null=True, blank=True)
# spam_flag = models.BooleanField(default=False)
# abuse_flag = models.BooleanField(default=False)
# orig_json = models.TextField(blank=True)
def __unicode__(self):
return self.stance
class Meta:
ordering = ('date_entered',)
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this organization came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_last_position_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "pos" = tells us this is a unique id for an pos
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}pos{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
super(PositionEntered, self).save(*args, **kwargs)
# Is the position is an actual endorsement?
def is_support(self):
if self.stance == SUPPORT:
return True
return False
# Is the position a rating that is 66% or greater?
def is_positive_rating(self):
if self.stance == PERCENT_RATING:
rating_percentage = convert_to_int(self.vote_smart_rating)
if rating_percentage >= 66:
return True
return False
# Is the position is an actual endorsement or a rating that is 66% or greater?
def is_support_or_positive_rating(self):
if self.is_support():
return True
elif self.is_positive_rating():
return True
return False
# Is the position an anti-endorsement?
def is_oppose(self):
if self.stance == OPPOSE:
return True
return False
# Is the position a rating that is 33% or less?
def is_negative_rating(self):
if self.stance == PERCENT_RATING:
rating_percentage = convert_to_int(self.vote_smart_rating)
if rating_percentage <= 33:
return True
return False
# Is the position is an actual endorsement or a rating that is 66% or greater?
def is_oppose_or_negative_rating(self):
if self.is_oppose():
return True
elif self.is_negative_rating():
return True
return False
def is_no_stance(self):
"""
"is_no_stance" means that they may have a position, but we don't know the stance -- its not in the database
DALE 2016-8-29 We will want to deprecate NO_STANCE and replace with INFORMATION_ONLY
:return:
"""
if self.stance == NO_STANCE:
return True
elif self.stance == PERCENT_RATING:
rating_percentage = convert_to_int(self.vote_smart_rating)
if (rating_percentage > 33) and (rating_percentage < 66):
return True
return False
def is_information_only(self):
"""
"information_only" means that they are not taking a support/oppose position
:return:
"""
if self.stance == INFORMATION_ONLY:
return True
if positive_value_exists(self.statement_text) or positive_value_exists(self.statement_html):
# If there is a text description, and no SUPPORT or OPPOSE, then it is INFORMATION_ONLY
if self.stance != OPPOSE and self.stance != SUPPORT and self.stance != STILL_DECIDING \
and self.stance != PERCENT_RATING:
return True
return False
def is_still_deciding(self):
if self.stance == STILL_DECIDING:
return True
return False
def last_updated(self):
if positive_value_exists(self.date_last_changed):
return str(self.date_last_changed)
elif positive_value_exists(self.date_entered):
return str(self.date_entered)
return ''
def candidate_campaign(self):
if not self.candidate_campaign_id:
return
try:
candidate_campaign = CandidateCampaign.objects.get(id=self.candidate_campaign_id)
except CandidateCampaign.MultipleObjectsReturned as e:
logger.error("position.candidate_campaign Found multiple")
return
except CandidateCampaign.DoesNotExist:
return
return candidate_campaign
def contest_measure(self):
if not self.contest_measure_id:
return
try:
contest_measure = ContestMeasure.objects.get(id=self.contest_measure_id)
except ContestMeasure.MultipleObjectsReturned as e:
logger.error("position.contest_measure Found multiple")
return
except ContestMeasure.DoesNotExist:
return
return contest_measure
def election(self):
if not self.google_civic_election_id:
return
try:
election = Election.objects.get(google_civic_election_id=self.google_civic_election_id)
except Election.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
logger.error("position.election Found multiple")
return
except Election.DoesNotExist:
return
return election
def organization(self):
if not self.organization_id:
return
try:
organization = Organization.objects.get(id=self.organization_id)
except Organization.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
logger.error("position.organization Found multiple")
return
except Organization.DoesNotExist:
return
return organization
class PositionForFriends(models.Model):
"""
Any position intended for friends only that is entered by any organization or candidate gets its own
PositionForFriends entry.
NOTE: We also have PositionEntered table that is exactly the same structure as PositionForFriends. It holds
opinions that voters only want to share with friends.
"""
# We are relying on built-in Python id field
# The we_vote_id identifier is unique across all We Vote sites, and allows us to share our org info with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "pos", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.we_vote_id_last_position_integer
we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, default=None, null=True, blank=True, unique=True)
# The id for the generated position that this PositionForFriends entry influences
position_id = models.BigIntegerField(null=True, blank=True) # NOT USED CURRENTLY
test = models.BigIntegerField(null=True, blank=True)
ballot_item_display_name = models.CharField(verbose_name="text name for ballot item",
max_length=255, null=True, blank=True)
# We cache the url to an image for the candidate, measure or office for rapid display
ballot_item_image_url_https = models.URLField(
verbose_name='url of https image for candidate, measure or office',
blank=True, null=True)
ballot_item_twitter_handle = models.CharField(
verbose_name='twitter screen_name for candidate, measure, or office',
max_length=255, null=True, unique=False)
# What is the organization name, voter name, or public figure name? We cache this here for rapid display
speaker_display_name = models.CharField(
verbose_name="name of the org or person with position", max_length=255, null=True, blank=True, unique=False)
# We cache the url to an image for the org, voter, or public_figure for rapid display
speaker_image_url_https = models.URLField(verbose_name='url of https image for org or person with position',
blank=True, null=True)
speaker_twitter_handle = models.CharField(verbose_name='twitter screen_name for org or person with position',
max_length=255, null=True, unique=False)
date_entered = models.DateTimeField(verbose_name='date entered', null=True, auto_now=True)
# The date the this position last changed
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
# The organization this position is for
organization_id = models.BigIntegerField(null=True, blank=True)
organization_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the organization", max_length=255, null=True,
blank=True, unique=False)
# The voter expressing the opinion
# Note that for organizations who have friends, the voter_we_vote_id is what we use to link to the friends.
# Public positions from an organization are shared via organization_we_vote_id (in PositionEntered table), while
# friend's-only positions are shared via voter_we_vote_id.
voter_id = models.BigIntegerField(null=True, blank=True)
voter_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the voter expressing the opinion", max_length=255, null=True,
blank=True, unique=False)
# The unique id of the public figure expressing the opinion. May be null if position is from org or voter
# instead of public figure.
public_figure_we_vote_id = models.CharField(
verbose_name="public figure we vote id", max_length=255, null=True, blank=True, unique=False)
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=True, blank=True)
# State code
state_code = models.CharField(verbose_name="us state of the ballot item position is for", max_length=2,
null=True, blank=True)
# ### Values from Vote Smart ###
vote_smart_rating_id = models.BigIntegerField(null=True, blank=True, unique=False)
# Usually in one of these two formats 2015, 2014-2015
vote_smart_time_span = models.CharField(
verbose_name="the period in which the organization stated this position", max_length=255, null=True,
blank=True, unique=False)
vote_smart_rating = models.CharField(
verbose_name="vote smart value between 0-100", max_length=255, null=True,
blank=True, unique=False)
vote_smart_rating_name = models.CharField(max_length=255, null=True, blank=True, unique=False)
# The unique We Vote id of the tweet that is the source of the position
tweet_source_id = models.BigIntegerField(null=True, blank=True)
# This is the voter / authenticated user who entered the position for an organization
# (NOT the voter expressing opinion)
voter_entering_position = models.ForeignKey(
Voter, verbose_name='authenticated user who entered position', null=True, blank=True)
# The Twitter user account that generated this position
twitter_user_entered_position = models.ForeignKey(TwitterUser, null=True, verbose_name='')
# This is the office that the position refers to.
# Either contest_measure is filled, contest_office OR candidate_campaign, but not all three
contest_office_id = models.BigIntegerField(verbose_name='id of contest_office', null=True, blank=True)
contest_office_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the contest_office", max_length=255, null=True, blank=True,
unique=False)
contest_office_name = models.CharField(verbose_name="name of the office", max_length=255, null=True, blank=True)
# This is the candidate/politician that the position refers to.
# Either candidate_campaign is filled, contest_office OR contest_measure, but not all three
candidate_campaign_id = models.BigIntegerField(verbose_name='id of candidate_campaign', null=True, blank=True)
candidate_campaign_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the candidate_campaign", max_length=255, null=True,
blank=True, unique=False)
# The candidate's name as passed over by Google Civic. We save this so we can match to this candidate if an import
# doesn't include a we_vote_id we recognize.
google_civic_candidate_name = models.CharField(
verbose_name="candidate name exactly as received from google civic",
max_length=255, null=True, blank=True)
# Useful for queries based on Politicians -- not the main table we use for ballot display though
politician_id = models.BigIntegerField(verbose_name='', null=True, blank=True)
politician_we_vote_id = models.CharField(
verbose_name="we vote permanent id for politician", max_length=255, null=True,
blank=True, unique=False)
political_party = models.CharField(verbose_name="candidate political party", max_length=255, null=True)
# This is the measure/initiative/proposition that the position refers to.
# Either contest_measure is filled, contest_office OR candidate_campaign, but not all three
contest_measure_id = models.BigIntegerField(verbose_name='id of contest_measure', null=True, blank=True)
contest_measure_we_vote_id = models.CharField(
verbose_name="we vote permanent id for the contest_measure", max_length=255, null=True,
blank=True, unique=False)
# The measure's title as passed over by Google Civic. We save this so we can match to this measure if an import
# doesn't include a we_vote_id we recognize.
google_civic_measure_title = models.CharField(verbose_name="measure title exactly as received from google civic",
max_length=255, null=True, blank=True)
# Strategic denormalization - this is redundant but will make generating the voter guide easier.
# geo = models.ForeignKey(Geo, null=True, related_name='pos_geo')
# issue = models.ForeignKey(Issue, null=True, blank=True, related_name='')
stance = models.CharField(max_length=15, choices=POSITION_CHOICES, default=NO_STANCE) # supporting/opposing
statement_text = models.TextField(null=True, blank=True, )
statement_html = models.TextField(null=True, blank=True, )
# A link to any location with more information about this position
more_info_url = models.URLField(blank=True, null=True, verbose_name='url with more info about this position')
# Did this position come from a web scraper?
from_scraper = models.BooleanField(default=False)
# Was this position certified by an official with the organization?
organization_certified = models.BooleanField(default=False)
# Was this position certified by an official We Vote volunteer?
volunteer_certified = models.BooleanField(default=False)
# link = models.URLField(null=True, blank=True,)
# link_title = models.TextField(null=True, blank=True, max_length=128)
# link_site = models.TextField(null=True, blank=True, max_length=64)
# link_txt = models.TextField(null=True, blank=True)
# link_img = models.URLField(null=True, blank=True)
# Set this to True after getting all the link details (title, txt, img etc)
# details_loaded = models.BooleanField(default=False)
# video_embed = models.URLField(null=True, blank=True)
# spam_flag = models.BooleanField(default=False)
# abuse_flag = models.BooleanField(default=False)
# orig_json = models.TextField(blank=True)
def __unicode__(self):
return self.stance
class Meta:
ordering = ('date_entered',)
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this organization came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_last_position_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "pos" = tells us this is a unique id for an pos
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}pos{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
super(PositionForFriends, self).save(*args, **kwargs)
# Is the position is an actual endorsement?
def is_support(self):
if self.stance == SUPPORT:
return True
return False
# Is the position a rating that is 66% or greater?
def is_positive_rating(self):
if self.stance == PERCENT_RATING:
rating_percentage = convert_to_int(self.vote_smart_rating)
if rating_percentage >= 66:
return True
return False
# Is the position is an actual endorsement or a rating that is 66% or greater?
def is_support_or_positive_rating(self):
if self.is_support():
return True
elif self.is_positive_rating():
return True
return False
# Is the position an anti-endorsement?
def is_oppose(self):
if self.stance == OPPOSE:
return True
return False
# Is the position a rating that is 33% or less?
def is_negative_rating(self):
if self.stance == PERCENT_RATING:
rating_percentage = convert_to_int(self.vote_smart_rating)
if rating_percentage <= 33:
return True
return False
# Is the position is an actual endorsement or a rating that is 66% or greater?
def is_oppose_or_negative_rating(self):
if self.is_oppose():
return True
elif self.is_negative_rating():
return True
return False
def is_no_stance(self):
"""
"is_no_stance" means that they may have a position, but we don't know the stance -- its not in the database
DALE 2016-8-29 We will want to deprecate NO_STANCE and replace with INFORMATION_ONLY
:return:
"""
if self.stance == NO_STANCE:
return True
elif self.stance == PERCENT_RATING:
rating_percentage = convert_to_int(self.vote_smart_rating)
if (rating_percentage > 33) and (rating_percentage < 66):
return True
return False
def is_information_only(self):
"""
"information_only" means that they are not taking a support/oppose position
:return:
"""
if self.stance == INFORMATION_ONLY:
return True
if positive_value_exists(self.statement_text) or positive_value_exists(self.statement_html):
# If there is a text description, and no SUPPORT or OPPOSE, then it is INFORMATION_ONLY
if self.stance != OPPOSE and self.stance != SUPPORT and self.stance != STILL_DECIDING \
and self.stance != PERCENT_RATING:
return True
return False
def is_still_deciding(self):
if self.stance == STILL_DECIDING:
return True
return False
def last_updated(self):
if positive_value_exists(self.date_last_changed):
return str(self.date_last_changed)
elif positive_value_exists(self.date_entered):
return str(self.date_entered)
return ''
def candidate_campaign(self):
if not self.candidate_campaign_id:
return
try:
candidate_campaign = CandidateCampaign.objects.get(id=self.candidate_campaign_id)
except CandidateCampaign.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
logger.error("position.candidate_campaign Found multiple")
return
except CandidateCampaign.DoesNotExist:
return
return candidate_campaign
def election(self):
if not self.google_civic_election_id:
return
try:
election = Election.objects.get(google_civic_election_id=self.google_civic_election_id)
except Election.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
logger.error("position.election Found multiple")
return
except Election.DoesNotExist:
return
return election
def organization(self):
if not self.organization_id:
return
try:
organization = Organization.objects.get(id=self.organization_id)
except Organization.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
logger.error("position.organization Found multiple")
return
except Organization.DoesNotExist:
return
return organization
# NOTE: 2015-11 We are still using PositionEntered and PositionForFriends instead of Position
class Position(models.Model):
"""
This is a table of data generated from PositionEntered. Not all fields copied over from PositionEntered
"""
# We are relying on built-in Python id field
# The PositionEntered entry that was copied into this entry based on verification rules
position_entered_id = models.BigIntegerField(null=True, blank=True)
date_entered = models.DateTimeField(verbose_name='date entered', null=True, auto_now=True)
# The organization this position is for
organization_id = models.BigIntegerField(null=True, blank=True)
# The election this position is for
# election_id = models.BigIntegerField(verbose_name='election id', null=True, blank=True) # DEPRECATED
# The unique ID of the election containing this contest. (Provided by Google Civic)
google_civic_election_id = models.CharField(
verbose_name="google civic election id", max_length=255, null=True, blank=True)
google_civic_election_id_new = models.PositiveIntegerField(
verbose_name="google civic election id", default=0, null=True, blank=True)
candidate_campaign = models.ForeignKey(
CandidateCampaign, verbose_name='candidate campaign', null=True, blank=True, related_name='position_candidate')
# Useful for queries based on Politicians -- not the main table we use for ballot display though
politician_id = models.BigIntegerField(verbose_name='', null=True, blank=True)
# This is the measure/initiative/proposition that the position refers to.
# Either measure_campaign is filled OR candidate_campaign, but not both
measure_campaign = models.ForeignKey(
ContestMeasure, verbose_name='measure campaign', null=True, blank=True, related_name='position_measure')
stance = models.CharField(max_length=15, choices=POSITION_CHOICES) # supporting/opposing
statement_text = models.TextField(null=True, blank=True,)
statement_html = models.TextField(null=True, blank=True,)
# A link to any location with more information about this position
more_info_url = models.URLField(blank=True, null=True, verbose_name='url with more info about this position')
def __unicode__(self):
return self.name
class Meta:
ordering = ('date_entered',)
# def display_ballot_item_name(self):
# """
# Organization supports 'ballot_item_name' (which could be a campaign name, or measure name
# :return:
# """
# # Try to retrieve the candidate_campaign
# if candidate_campaign.id:
class PositionListManager(models.Model):
def add_is_public_position(self, incoming_position_list, is_public_position):
outgoing_position_list = []
for one_position in incoming_position_list:
one_position.is_public_position = is_public_position
outgoing_position_list.append(one_position)
return outgoing_position_list
def calculate_positions_followed_by_voter(
self, voter_id, all_positions_list, organizations_followed_by_voter):
"""
We need a list of positions that were made by an organization, public figure or friend that this voter follows
:param voter_id:
:param all_positions_list:
:param organizations_followed_by_voter:
:return:
"""
positions_followed_by_voter = []
# Only return the positions if they are from organizations the voter follows
for position in all_positions_list:
if position.voter_id == voter_id: # We include the voter currently viewing the ballot in this list
positions_followed_by_voter.append(position)
# TODO Include a check against a list of "people_followed_by_voter" so we can include friends
elif position.organization_id in organizations_followed_by_voter:
positions_followed_by_voter.append(position)
return positions_followed_by_voter
def calculate_positions_not_followed_by_voter(
self, all_positions_list, organizations_followed_by_voter):
"""
We need a list of positions that were NOT made by an organization, public figure or friend
that this voter follows
:param all_positions_list:
:param organizations_followed_by_voter:
:return:
"""
positions_not_followed_by_voter = []
# Only return the positions if they are from organizations the voter follows
for position in all_positions_list:
# Some positions are for individual voters, so we want to filter those out
if position.organization_id \
and position.organization_id not in organizations_followed_by_voter:
positions_not_followed_by_voter.append(position)
return positions_not_followed_by_voter
def fetch_positions_count_for_voter_guide(self, organization_we_vote_id, google_civic_election_id,
retrieve_public_positions=True, stance_we_are_looking_for=ANY_STANCE):
# Don't proceed unless we have a correct stance identifier
if stance_we_are_looking_for not \
in(ANY_STANCE, SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING):
return 0
# Note that one of the incoming options for stance_we_are_looking_for is 'ANY_STANCE'
# which means we want to return all stances
# Don't proceed unless we have organization identifier and the election we care about
if not positive_value_exists(organization_we_vote_id) and not \
positive_value_exists(google_civic_election_id):
return 0
position_count = 0
try:
if retrieve_public_positions:
position_on_stage_starter = PositionEntered
position_on_stage = PositionEntered()
else:
position_on_stage_starter = PositionForFriends
position_on_stage = PositionForFriends()
position_list = position_on_stage_starter.objects.order_by('date_entered')
position_list = position_list.filter(organization_we_vote_id=organization_we_vote_id)
position_list = position_list.filter(google_civic_election_id=google_civic_election_id)
# SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING
if stance_we_are_looking_for != ANY_STANCE:
# If we passed in the stance "ANY_STANCE" it means we want to not filter down the list
if stance_we_are_looking_for == SUPPORT:
position_list = position_list.filter(
Q(stance=stance_we_are_looking_for) | # Matches "is_support"
(Q(stance=PERCENT_RATING) & Q(vote_smart_rating__gte=66)) # Matches "is_positive_rating"
) # | Q(stance=GRADE_RATING))
elif stance_we_are_looking_for == OPPOSE:
position_list = position_list.filter(
Q(stance=stance_we_are_looking_for) | # Matches "is_oppose"
(Q(stance=PERCENT_RATING) & Q(vote_smart_rating__lte=33)) # Matches "is_negative_rating"
) # | Q(stance=GRADE_RATING))
else:
position_list = position_list.filter(stance=stance_we_are_looking_for)
# Limit to positions in the last x years - currently we are not limiting
# position_list = position_list.filter(election_id=election_id)
position_count = position_list.count()
except Exception as e:
pass
return position_count
def remove_positions_ignored_by_voter(
self, positions_list, organizations_ignored_by_voter):
"""
We need a list of positions that were NOT made by an organization, public figure or friend
that this voter follows
:param positions_list:
:param organizations_ignored_by_voter:
:return:
"""
positions_ignored_by_voter = []
# Only return the positions if they are from organizations the voter follows
for position in positions_list:
# Some positions are for individual voters, so we want to filter those out
if position.organization_id \
and position.organization_id not in organizations_ignored_by_voter:
positions_ignored_by_voter.append(position)
return positions_ignored_by_voter
def retrieve_all_positions_for_candidate_campaign(self, retrieve_public_positions,
candidate_campaign_id, candidate_campaign_we_vote_id='',
stance_we_are_looking_for=ANY_STANCE, most_recent_only=True,
friends_we_vote_id_list=False):
"""
We do not attempt to retrieve public positions and friend's-only positions in the same call.
:param retrieve_public_positions:
:param candidate_campaign_id:
:param candidate_campaign_we_vote_id:
:param stance_we_are_looking_for:
:param most_recent_only:
:param friends_we_vote_id_list: If this comes in as a list, use that list. If it comes in as False,
we can consider looking up the values if they are needed, but we will then need voter_device_id passed in too.
:return:
"""
if stance_we_are_looking_for not \
in(ANY_STANCE, SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING):
position_list = []
return position_list
# Note that one of the incoming options for stance_we_are_looking_for is 'ANY_STANCE'
# which means we want to return all stances
if not positive_value_exists(candidate_campaign_id) and not \
positive_value_exists(candidate_campaign_we_vote_id):
position_list = []
return position_list
# If retrieving PositionForFriends, make sure we have the necessary variables
if not retrieve_public_positions:
if not friends_we_vote_id_list:
position_list = []
return position_list
elif type(friends_we_vote_id_list) is list and len(friends_we_vote_id_list) == 0:
position_list = []
return position_list
# Retrieve the support positions for this candidate_campaign_id
position_list = []
position_list_found = False
try:
if retrieve_public_positions:
position_list = PositionEntered.objects.order_by('date_entered')
retrieve_friends_positions = False
else:
position_list = PositionForFriends.objects.order_by('date_entered')
retrieve_friends_positions = True
if positive_value_exists(candidate_campaign_id):
position_list = position_list.filter(candidate_campaign_id=candidate_campaign_id)
else:
position_list = position_list.filter(candidate_campaign_we_vote_id=candidate_campaign_we_vote_id)
# SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING
if stance_we_are_looking_for != ANY_STANCE:
# If we passed in the stance "ANY_STANCE" it means we want to not filter down the list
if stance_we_are_looking_for == SUPPORT or stance_we_are_looking_for == OPPOSE:
position_list = position_list.filter(
Q(stance=stance_we_are_looking_for) | Q(stance=PERCENT_RATING)) # | Q(stance=GRADE_RATING))
else:
position_list = position_list.filter(stance=stance_we_are_looking_for)
if retrieve_friends_positions and friends_we_vote_id_list is not False:
# Find positions from friends. Look for we_vote_id case insensitive.
we_vote_id_filter = Q()
for we_vote_id in friends_we_vote_id_list:
we_vote_id_filter |= Q(voter_we_vote_id__iexact=we_vote_id)
position_list = position_list.filter(we_vote_id_filter)
# Limit to positions in the last x years - currently we are not limiting
# position_list = position_list.filter(election_id=election_id)
# Now filter out the positions that have a percent rating that doesn't match the stance_we_are_looking_for
if stance_we_are_looking_for == SUPPORT or stance_we_are_looking_for == OPPOSE:
revised_position_list = []
for one_position in position_list:
if stance_we_are_looking_for == SUPPORT:
if one_position.stance == PERCENT_RATING:
if one_position.is_positive_rating(): # This was "is_support"
revised_position_list.append(one_position)
else:
revised_position_list.append(one_position)
elif stance_we_are_looking_for == OPPOSE:
if one_position.stance == PERCENT_RATING:
if one_position.is_negative_rating(): # This was "is_oppose"
revised_position_list.append(one_position)
else:
revised_position_list.append(one_position)
position_list = revised_position_list
if len(position_list):
position_list_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
# If we have multiple positions for one org, we only want to show the most recent.
if most_recent_only:
if position_list_found:
position_list_filtered = self.remove_older_positions_for_each_org(position_list)
else:
position_list_filtered = []
else:
position_list_filtered = position_list
if position_list_found:
return position_list_filtered
else:
position_list_filtered = []
return position_list_filtered
def retrieve_all_positions_for_contest_measure(self, retrieve_public_positions,
contest_measure_id, contest_measure_we_vote_id,
stance_we_are_looking_for,
most_recent_only=True, friends_we_vote_id_list=False):
if stance_we_are_looking_for not \
in(ANY_STANCE, SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING):
position_list = []
return position_list
# Note that one of the incoming options for stance_we_are_looking_for is 'ANY' which means we want to return
# all stances
if not positive_value_exists(contest_measure_id) and not \
positive_value_exists(contest_measure_we_vote_id):
position_list = []
return position_list
# If retrieving PositionForFriends, make sure we have the necessary variables
if not retrieve_public_positions:
if not friends_we_vote_id_list:
position_list = []
return position_list
elif type(friends_we_vote_id_list) is list and len(friends_we_vote_id_list) == 0:
position_list = []
return position_list
# Retrieve the support positions for this contest_measure_id
position_list = []
position_list_found = False
try:
if retrieve_public_positions:
position_list = PositionEntered.objects.order_by('date_entered')
retrieve_friends_positions = False
else:
position_list = PositionForFriends.objects.order_by('date_entered')
retrieve_friends_positions = True
if positive_value_exists(contest_measure_id):
position_list = position_list.filter(contest_measure_id=contest_measure_id)
else:
position_list = position_list.filter(contest_measure_we_vote_id=contest_measure_we_vote_id)
# SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING
if stance_we_are_looking_for != ANY_STANCE:
# If we passed in the stance "ANY" it means we want to not filter down the list
position_list = position_list.filter(stance=stance_we_are_looking_for)
# NOTE: We don't have a special case for
# "if stance_we_are_looking_for == SUPPORT or stance_we_are_looking_for == OPPOSE"
# for contest_measure (like we do for candidate_campaign) because we don't have to deal with
# PERCENT_RATING data with measures
if retrieve_friends_positions and friends_we_vote_id_list is not False:
# Find positions from friends. Look for we_vote_id case insensitive.
we_vote_id_filter = Q()
for we_vote_id in friends_we_vote_id_list:
we_vote_id_filter |= Q(voter_we_vote_id__iexact=we_vote_id)
position_list = position_list.filter(we_vote_id_filter)
# Limit to positions in the last x years - currently we are not limiting
# position_list = position_list.filter(election_id=election_id)
# We don't need to filter out the positions that have a percent rating that doesn't match
# the stance_we_are_looking_for (like we do for candidates)
if len(position_list):
position_list_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
# If we have multiple positions for one org, we only want to show the most recent.
if most_recent_only:
if position_list_found:
position_list_filtered = self.remove_older_positions_for_each_org(position_list)
else:
position_list_filtered = []
else:
position_list_filtered = position_list
if position_list_found:
return position_list_filtered
else:
position_list_filtered = []
return position_list_filtered
def retrieve_all_positions_for_organization(self, organization_id, organization_we_vote_id,
stance_we_are_looking_for, friends_vs_public,
filter_for_voter, filter_out_voter, voter_device_id,
google_civic_election_id, state_code):
"""
Return a position list with all of the organization's positions.
Incoming filters include: stance_we_are_looking_for, friends_vs_public, filter_for_voter, filter_out_voter,
google_civic_election_id, state_code
:param organization_id:
:param organization_we_vote_id:
:param stance_we_are_looking_for:
:param friends_vs_public:
:param filter_for_voter: Show the positions relevant to the election the voter is currently looking at
:param filter_out_voter: Show positions for all elections the voter is NOT looking at
:param voter_device_id:
:param google_civic_election_id:
:param state_code:
:return:
"""
if stance_we_are_looking_for not \
in(ANY_STANCE, SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING):
position_list = []
return position_list
# Note that one of the incoming options for stance_we_are_looking_for is 'ANY_STANCE'
# which means we want to return all stances
if not positive_value_exists(organization_id) and not \
positive_value_exists(organization_we_vote_id):
position_list = []
return position_list
retrieve_friends_positions = friends_vs_public in (FRIENDS_ONLY, FRIENDS_AND_PUBLIC)
retrieve_public_positions = friends_vs_public in (PUBLIC_ONLY, FRIENDS_AND_PUBLIC)
# Retrieve public positions for this organization
public_positions_list = []
friends_positions_list = []
position_list_found = False
if retrieve_public_positions:
try:
public_positions_list = PositionEntered.objects.order_by('-vote_smart_time_span',
'-google_civic_election_id')
if positive_value_exists(organization_id):
public_positions_list = public_positions_list.filter(organization_id=organization_id)
else:
public_positions_list = public_positions_list.filter(
organization_we_vote_id=organization_we_vote_id)
# SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING
if stance_we_are_looking_for != ANY_STANCE:
# If we passed in the stance "ANY_STANCE" it means we want to not filter down the list
if stance_we_are_looking_for == SUPPORT or stance_we_are_looking_for == OPPOSE:
public_positions_list = public_positions_list.filter(
Q(stance=stance_we_are_looking_for) | Q(stance=PERCENT_RATING)) # | Q(stance=GRADE_RATING))
else:
public_positions_list = public_positions_list.filter(stance=stance_we_are_looking_for)
# Gather the ids for all positions in this election
public_only = True
we_vote_ids_for_all_positions_for_this_election = []
google_civic_election_id_local_scope = 0
if positive_value_exists(filter_for_voter) or positive_value_exists(filter_out_voter):
results = figure_out_google_civic_election_id_voter_is_watching(voter_device_id)
google_civic_election_id_local_scope = results['google_civic_election_id']
if positive_value_exists(google_civic_election_id_local_scope):
all_positions_for_this_election = self.retrieve_all_positions_for_election(
google_civic_election_id_local_scope, stance_we_are_looking_for, public_only)
for one_position in all_positions_for_this_election:
we_vote_ids_for_all_positions_for_this_election.append(one_position.we_vote_id)
# We can filter by only one of these
if positive_value_exists(filter_for_voter): # This is the default option
if positive_value_exists(google_civic_election_id_local_scope):
# Limit positions we can retrieve for an org to only the items in this election
public_positions_list = public_positions_list.filter(
we_vote_id__in=we_vote_ids_for_all_positions_for_this_election)
else:
# If no election is found for the voter, don't show any positions
public_positions_list = []
elif positive_value_exists(filter_out_voter):
if positive_value_exists(google_civic_election_id_local_scope):
# Limit positions we can retrieve for an org to only the items NOT in this election
public_positions_list = public_positions_list.exclude(
we_vote_id__in=we_vote_ids_for_all_positions_for_this_election)
else:
# Leave the position_list as is.
pass
elif positive_value_exists(google_civic_election_id):
# Please note that this option doesn't catch Vote Smart ratings, which are not
# linked by google_civic_election_id
public_positions_list = public_positions_list.filter(
google_civic_election_id=google_civic_election_id)
elif positive_value_exists(state_code):
public_positions_list = public_positions_list.filter(state_code__iexact=state_code)
# And finally, make sure there is a stance, or text commentary -- exclude these cases
public_positions_list = public_positions_list.exclude(
Q(stance__iexact=NO_STANCE) &
(Q(statement_text__isnull=True) | Q(statement_text__exact='')) &
(Q(statement_html__isnull=True) | Q(statement_html__exact=''))
)
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if retrieve_friends_positions:
try:
# Current voter visiting the site
current_voter_we_vote_id = ""
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id)
if results['voter_found']:
voter = results['voter']
current_voter_we_vote_id = voter.we_vote_id
voter_manager = VoterManager()
# We need organization_we_vote_id, so look it up if only organization_id was passed in
if not organization_we_vote_id:
organization_manager = OrganizationManager()
organization_we_vote_id = organization_manager.fetch_we_vote_id_from_local_id(organization_id)
# Find the Voter id for the organization showing the positions. Organizations that sign in with
# their Twitter accounts get a Voter entry, with "voter.linked_organization_we_vote_id" containing
# the organizations we_vote_id.
results = voter_manager.retrieve_voter_from_organization_we_vote_id(organization_we_vote_id)
organization_voter_local_id = 0
organization_voter_we_vote_id = ""
if results['voter_found']:
organization_voter = results['voter']
organization_voter_local_id = organization_voter.id
organization_voter_we_vote_id = organization_voter.we_vote_id
# Is the viewer a friend of this organization? If NOT, then there is no need to proceed
voter_is_friend_of_organization = False
if positive_value_exists(current_voter_we_vote_id) and \
organization_voter_we_vote_id.lower() == current_voter_we_vote_id.lower():
# If the current viewer is looking at own entry, then show what should be shown to friends
voter_is_friend_of_organization = True
else:
# TODO DALE Check to see if current voter is in list of friends
voter_is_friend_of_organization = False # Temp hard coding
friends_positions_list = []
if voter_is_friend_of_organization:
# If here, then the viewer is a friend with the organization. Look up positions that
# are only shown to friends.
friends_positions_list = PositionForFriends.objects.order_by('-vote_smart_time_span',
'-google_civic_election_id')
# Get the entries saved by the organization's voter account
if positive_value_exists(organization_voter_local_id):
friends_positions_list = friends_positions_list.filter(
voter_id=organization_voter_local_id)
else:
friends_positions_list = friends_positions_list.filter(
voter_we_vote_id=organization_voter_we_vote_id)
# SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING
if stance_we_are_looking_for != ANY_STANCE:
# If we passed in the stance "ANY_STANCE" it means we want to not filter down the list
if stance_we_are_looking_for == SUPPORT or stance_we_are_looking_for == OPPOSE:
friends_positions_list = friends_positions_list.filter(
Q(stance=stance_we_are_looking_for) | Q(stance=PERCENT_RATING))
# | Q(stance=GRADE_RATING))
else:
friends_positions_list = friends_positions_list.filter(stance=stance_we_are_looking_for)
# Gather the ids for all positions in this election so we can figure out which positions
# relate to the election the voter is currently looking at, vs. for all other elections
public_only = False
we_vote_ids_for_all_positions_for_this_election = []
google_civic_election_id_local_scope = 0
if positive_value_exists(filter_for_voter) or positive_value_exists(filter_out_voter):
results = figure_out_google_civic_election_id_voter_is_watching(voter_device_id)
google_civic_election_id_local_scope = results['google_civic_election_id']
if positive_value_exists(google_civic_election_id_local_scope):
all_positions_for_this_election = self.retrieve_all_positions_for_election(
google_civic_election_id_local_scope, stance_we_are_looking_for, public_only)
for one_position in all_positions_for_this_election:
we_vote_ids_for_all_positions_for_this_election.append(one_position.we_vote_id)
# We can filter by only one of these
if positive_value_exists(filter_for_voter): # This is the default option
if positive_value_exists(google_civic_election_id_local_scope):
# Limit positions we can retrieve for an org to only the items in this election
friends_positions_list = friends_positions_list.filter(
we_vote_id__in=we_vote_ids_for_all_positions_for_this_election)
else:
# If no election is found for the voter, don't show any positions
friends_positions_list = []
elif positive_value_exists(filter_out_voter):
if positive_value_exists(google_civic_election_id_local_scope):
# Limit positions we can retrieve for an org to only the items NOT in this election
friends_positions_list = friends_positions_list.exclude(
we_vote_id__in=we_vote_ids_for_all_positions_for_this_election)
else:
# Leave the position_list as is.
pass
elif positive_value_exists(google_civic_election_id):
# Please note that this option doesn't catch Vote Smart ratings, which are not
# linked by google_civic_election_id
# We are only using this if google_civic_election_id was passed
# into retrieve_all_positions_for_organization
friends_positions_list = friends_positions_list.filter(
google_civic_election_id=google_civic_election_id)
elif positive_value_exists(state_code):
friends_positions_list = friends_positions_list.filter(state_code__iexact=state_code)
# And finally, make sure there is a stance, or text commentary -- exclude these cases
friends_positions_list = friends_positions_list.exclude(
Q(stance__iexact=NO_STANCE) &
(Q(statement_text__isnull=True) | Q(statement_text__exact='')) &
(Q(statement_html__isnull=True) | Q(statement_html__exact=''))
)
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
# Merge public positions and "For friends" positions
public_positions_list = list(public_positions_list) # Force the query to run
# Flag all of these entries as "is_public_position = True"
revised_position_list = []
for one_position in public_positions_list:
one_position.is_public_position = True # Add this value
revised_position_list.append(one_position)
public_positions_list = revised_position_list
friends_positions_list = list(friends_positions_list) # Force the query to run
# Flag all of these entries as "is_public_position = False"
revised_position_list = []
for one_position in friends_positions_list:
one_position.is_public_position = False # Add this value
revised_position_list.append(one_position)
friends_positions_list = revised_position_list
position_list = public_positions_list + friends_positions_list
# Now filter out the positions that have a percent rating that doesn't match the stance_we_are_looking_for
if stance_we_are_looking_for == SUPPORT or stance_we_are_looking_for == OPPOSE:
revised_position_list = []
for one_position in position_list:
if stance_we_are_looking_for == SUPPORT:
if one_position.stance == PERCENT_RATING:
if one_position.is_support():
revised_position_list.append(one_position)
else:
revised_position_list.append(one_position)
elif stance_we_are_looking_for == OPPOSE:
if one_position.stance == PERCENT_RATING:
if one_position.is_oppose():
revised_position_list.append(one_position)
else:
revised_position_list.append(one_position)
position_list = revised_position_list
if len(position_list):
position_list_found = True
if position_list_found:
return position_list
else:
position_list = []
return position_list
def retrieve_all_positions_for_public_figure(self, public_figure_id, public_figure_we_vote_id,
stance_we_are_looking_for,
filter_for_voter, voter_device_id,
google_civic_election_id, state_code):
# TODO DALE Implement this: retrieve_all_positions_for_public_figure,
# model after retrieve_all_positions_for_organization
position_list = []
return position_list
def retrieve_all_positions_for_voter_simple(self, voter_id=0, voter_we_vote_id='', google_civic_election_id=0):
if not positive_value_exists(voter_id) and not positive_value_exists(voter_we_vote_id):
position_list = []
results = {
'status': 'MISSING_VOTER_ID',
'success': False,
'position_list_found': False,
'position_list': position_list,
}
return results
# Retrieve all positions for this voter -- if here we know that either voter_id or voter_we_vote_id exist
############################
# Retrieve public positions
try:
public_positions_list_query = PositionEntered.objects.all()
if positive_value_exists(voter_id):
public_positions_list_query = public_positions_list_query.filter(voter_id=voter_id)
elif positive_value_exists(voter_we_vote_id):
public_positions_list_query = public_positions_list_query.filter(voter_we_vote_id=voter_we_vote_id)
if positive_value_exists(google_civic_election_id):
public_positions_list_query = public_positions_list_query.filter(
google_civic_election_id=google_civic_election_id)
# Force the position for the most recent election to show up last
public_positions_list_query = public_positions_list_query.order_by('google_civic_election_id')
public_positions_list = list(public_positions_list_query) # Force the query to run
except Exception as e:
position_list = []
results = {
'status': 'VOTER_POSITION_ENTERED_SEARCH_FAILED',
'success': False,
'position_list_found': False,
'position_list': position_list,
}
return results
############################
# Retrieve positions meant for friends only
try:
friends_positions_list_query = PositionForFriends.objects.all()
if positive_value_exists(voter_id):
friends_positions_list_query = friends_positions_list_query.filter(voter_id=voter_id)
elif positive_value_exists(voter_we_vote_id):
friends_positions_list_query = friends_positions_list_query.filter(voter_we_vote_id=voter_we_vote_id)
if positive_value_exists(google_civic_election_id):
friends_positions_list_query = friends_positions_list_query.filter(
google_civic_election_id=google_civic_election_id)
# Force the position for the most recent election to show up last
friends_positions_list_query = friends_positions_list_query.order_by('google_civic_election_id')
friends_positions_list = list(friends_positions_list_query) # Force the query to run
except Exception as e:
position_list = []
results = {
'status': 'VOTER_POSITION_FOR_FRIENDS_SEARCH_FAILED',
'success': False,
'position_list_found': False,
'position_list': position_list,
}
return results
# Mark these positions as "is_public_position"
public_positions_list2 = []
for one_public_position in public_positions_list:
one_public_position.is_public_position = True
public_positions_list2.append(one_public_position)
# Mark these positions as NOT "is_public_position"
friends_positions_list2 = []
for one_friends_position in friends_positions_list:
one_friends_position.is_public_position = False
friends_positions_list2.append(one_friends_position)
# Merge public positions and "For friends" positions
position_list = public_positions_list2 + friends_positions_list2
position_list_found = len(position_list)
if position_list_found:
simple_position_list = []
for position in position_list:
# Make sure we have a ballot_item_we_vote_id
if positive_value_exists(position.candidate_campaign_we_vote_id):
ballot_item_we_vote_id = position.candidate_campaign_we_vote_id
elif positive_value_exists(position.contest_measure_we_vote_id):
ballot_item_we_vote_id = position.contest_measure_we_vote_id
else:
continue
one_position = {
'ballot_item_we_vote_id': ballot_item_we_vote_id,
'is_support': position.is_support(),
'is_oppose': position.is_oppose(),
'statement_text': position.statement_text,
'is_public_position': position.is_public_position,
}
simple_position_list.append(one_position)
results = {
'status': 'VOTER_POSITION_LIST_FOUND',
'success': True,
'position_list_found': True,
'position_list': simple_position_list,
}
return results
else:
position_list = []
results = {
'status': 'VOTER_POSITION_LIST_NOT_FOUND',
'success': True,
'position_list_found': False,
'position_list': position_list,
}
return results
def retrieve_all_positions_for_election(self, google_civic_election_id, stance_we_are_looking_for=ANY_STANCE,
public_only=False):
"""
Since we don't have a single way to ask the positions tables for only the positions related to a single
election, we need to look up the data in a round-about way. We get all candidates and measures in the election,
then return all positions that are about any of those candidates or measures.
:param google_civic_election_id:
:param stance_we_are_looking_for:
:param public_only: Do we care about public positions? Or friend's only positions?
:return:
"""
position_list = []
if stance_we_are_looking_for not \
in(ANY_STANCE, SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING):
position_list = []
return position_list
# Note that one of the incoming options for stance_we_are_looking_for is 'ANY_STANCE'
# which means we want to return all stances
if not positive_value_exists(google_civic_election_id):
position_list = []
return position_list
# We aren't going to search directly on google_civic_election_id, but instead assemble a list of the items
# on the ballot and then retrieve positions relating to any of those ballot_items (candidates or measures)
# TODO DALE Running this code every time is not scalable. We should cache a link between positions and the
# elections that we can use to look up when we need the link.
# Candidate related positions
candidate_campaign_we_vote_ids = []
candidate_campaign_list_manager = CandidateCampaignListManager()
candidate_results = candidate_campaign_list_manager.retrieve_all_candidates_for_upcoming_election(
google_civic_election_id)
if candidate_results['candidate_list_found']:
candidate_list_light = candidate_results['candidate_list_light']
for one_candidate in candidate_list_light:
candidate_campaign_we_vote_ids.append(one_candidate['candidate_we_vote_id'])
# Measure related positions
contest_measure_we_vote_ids = []
contest_measure_list_manager = ContestMeasureList()
measure_results = contest_measure_list_manager.retrieve_all_measures_for_upcoming_election(
google_civic_election_id)
if measure_results['measure_list_found']:
measure_list_light = measure_results['measure_list_light']
for one_measure in measure_list_light:
contest_measure_we_vote_ids.append(one_measure['measure_we_vote_id'])
position_list_found = False
try:
if public_only:
# Only return public positions
position_list = PositionEntered.objects.order_by('date_entered')
# TODO DALE 2016-08-30 I believe this is out of date because I *think* we have public positions
# with a value in voter_id
# We are removing old entries from voters that should be private
# position_list = position_list.filter(
# Q(voter_id__isnull=True) |
# Q(voter_id__exact=0))
else:
# Only return PositionForFriends entries
position_list = PositionForFriends.objects.order_by('date_entered')
position_list = position_list.filter(
Q(candidate_campaign_we_vote_id__in=candidate_campaign_we_vote_ids) |
Q(contest_measure_we_vote_id__in=contest_measure_we_vote_ids))
# position_list = position_list.filter(contest_measure_we_vote_id=contest_measure_we_vote_id)
# SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING
if stance_we_are_looking_for != ANY_STANCE:
# If we passed in the stance "ANY" it means we want to not filter down the list
position_list = position_list.filter(stance=stance_we_are_looking_for)
if len(position_list):
position_list_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if position_list_found:
return position_list
else:
position_list = []
return position_list
def remove_older_positions_for_each_org(self, position_list):
# If we have multiple positions for one org, we only want to show the most recent
organization_already_reviewed = []
organization_with_multiple_positions = []
newest_position_for_org = {} # Figure out the newest position per org that we should show
for one_position in position_list:
if one_position.organization_we_vote_id:
if one_position.organization_we_vote_id not in organization_already_reviewed:
organization_already_reviewed.append(one_position.organization_we_vote_id)
# Are we dealing with a time span (instead of google_civic_election_id)?
if positive_value_exists(one_position.vote_smart_time_span):
# Take the first four digits of one_position.vote_smart_time_span
first_four_digits = convert_to_int(one_position.vote_smart_time_span[:4])
# And figure out the newest position for each org
if one_position.organization_we_vote_id in newest_position_for_org:
# If we are here, it means we have seen this organization once already
if one_position.organization_we_vote_id not in organization_with_multiple_positions:
organization_with_multiple_positions.append(one_position.organization_we_vote_id)
# If this position is newer than the one already looked at, update newest_position_for_org
if first_four_digits > newest_position_for_org[one_position.organization_we_vote_id]:
newest_position_for_org[one_position.organization_we_vote_id] = first_four_digits
else:
newest_position_for_org[one_position.organization_we_vote_id] = first_four_digits
position_list_filtered = []
position_included_for_this_org = {}
for one_position in position_list:
if one_position.organization_we_vote_id in organization_with_multiple_positions:
first_four_digits = convert_to_int(one_position.vote_smart_time_span[:4])
if (newest_position_for_org[one_position.organization_we_vote_id] == first_four_digits) and \
(one_position.organization_we_vote_id not in position_included_for_this_org):
# If this position is the newest from among the organization's positions, include in results
position_list_filtered.append(one_position)
# Only add one position to position_list_filtered once
position_included_for_this_org[one_position.organization_we_vote_id] = True
else:
position_list_filtered.append(one_position)
return position_list_filtered
def retrieve_public_positions_count_for_candidate_campaign(self, candidate_campaign_id,
candidate_campaign_we_vote_id,
stance_we_are_looking_for):
if stance_we_are_looking_for not \
in(ANY_STANCE, SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING):
return 0
# Note that one of the incoming options for stance_we_are_looking_for is 'ANY_STANCE'
# which means we want to return all stances
if not positive_value_exists(candidate_campaign_id) and not \
positive_value_exists(candidate_campaign_we_vote_id):
return 0
# Retrieve the support positions for this candidate_campaign_id
position_count = 0
try:
position_list = PositionEntered.objects.order_by('date_entered')
if positive_value_exists(candidate_campaign_id):
position_list = position_list.filter(candidate_campaign_id=candidate_campaign_id)
else:
position_list = position_list.filter(candidate_campaign_we_vote_id=candidate_campaign_we_vote_id)
# SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING
if stance_we_are_looking_for != ANY_STANCE:
# If we passed in the stance "ANY_STANCE" it means we want to not filter down the list
if stance_we_are_looking_for == SUPPORT:
position_list = position_list.filter(
Q(stance=stance_we_are_looking_for) | # Matches "is_support"
(Q(stance=PERCENT_RATING) & Q(vote_smart_rating__gte=66)) # Matches "is_positive_rating"
) # | Q(stance=GRADE_RATING))
elif stance_we_are_looking_for == OPPOSE:
position_list = position_list.filter(
Q(stance=stance_we_are_looking_for) | # Matches "is_oppose"
(Q(stance=PERCENT_RATING) & Q(vote_smart_rating__lte=33)) # Matches "is_negative_rating"
) # | Q(stance=GRADE_RATING))
else:
position_list = position_list.filter(stance=stance_we_are_looking_for)
# Limit to positions in the last x years - currently we are not limiting
# position_list = position_list.filter(election_id=election_id)
position_count = position_list.count()
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
return position_count
def retrieve_public_positions_count_for_contest_measure(self, contest_measure_id,
contest_measure_we_vote_id,
stance_we_are_looking_for):
if stance_we_are_looking_for not \
in(ANY_STANCE, SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING):
return 0
# Note that one of the incoming options for stance_we_are_looking_for is 'ANY' which means we want to return
# all stances
if not positive_value_exists(contest_measure_id) and not \
positive_value_exists(contest_measure_we_vote_id):
return 0
# Retrieve the support positions for this contest_measure_id
position_count = 0
try:
position_list = PositionEntered.objects.order_by('date_entered')
if positive_value_exists(contest_measure_id):
position_list = position_list.filter(contest_measure_id=contest_measure_id)
else:
position_list = position_list.filter(contest_measure_we_vote_id=contest_measure_we_vote_id)
# SUPPORT, STILL_DECIDING, INFORMATION_ONLY, NO_STANCE, OPPOSE, PERCENT_RATING
if stance_we_are_looking_for != ANY_STANCE:
# If we passed in the stance "ANY" it means we want to not filter down the list
position_list = position_list.filter(stance=stance_we_are_looking_for)
# position_list = position_list.filter(election_id=election_id)
position_count = position_list.count()
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
return position_count
def retrieve_possible_duplicate_positions(self, google_civic_election_id, organization_we_vote_id,
candidate_we_vote_id, measure_we_vote_id,
we_vote_id_from_master=''):
position_list_objects = []
filters = []
position_list_found = False
try:
position_queryset = PositionEntered.objects.all()
position_queryset = position_queryset.filter(google_civic_election_id=google_civic_election_id)
# We don't look for office_we_vote_id because of the chance that locally we are using a
# different we_vote_id
# position_queryset = position_queryset.filter(contest_office_we_vote_id__iexact=office_we_vote_id)
# Ignore entries with we_vote_id coming in from master server
if positive_value_exists(we_vote_id_from_master):
position_queryset = position_queryset.filter(~Q(we_vote_id__iexact=we_vote_id_from_master))
# Situation 1 organization_we_vote_id + candidate_we_vote_id matches an entry already in the db
if positive_value_exists(organization_we_vote_id) and positive_value_exists(candidate_we_vote_id):
new_filter = (Q(organization_we_vote_id__iexact=organization_we_vote_id) &
Q(candidate_campaign_we_vote_id__iexact=candidate_we_vote_id))
filters.append(new_filter)
# Situation 2 organization_we_vote_id + measure_we_vote_id matches an entry already in the db
if positive_value_exists(organization_we_vote_id) and positive_value_exists(measure_we_vote_id):
new_filter = (Q(organization_we_vote_id__iexact=organization_we_vote_id) &
Q(contest_measure_we_vote_id__iexact=measure_we_vote_id))
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
position_queryset = position_queryset.filter(final_filters)
position_list_objects = position_queryset
if len(position_list_objects):
position_list_found = True
status = 'DUPLICATE_POSITIONS_RETRIEVED'
success = True
else:
status = 'NO_DUPLICATE_POSITIONS_RETRIEVED'
success = True
except PositionEntered.DoesNotExist:
# No candidates found. Not a problem.
status = 'NO_DUPLICATE_POSITIONS_FOUND_DoesNotExist'
position_list_objects = []
success = True
except Exception as e:
handle_exception(e, logger=logger)
status = 'FAILED retrieve_possible_duplicate_positions ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
results = {
'success': success,
'status': status,
'google_civic_election_id': google_civic_election_id,
'position_list_found': position_list_found,
'position_list': position_list_objects,
}
return results
class PositionEnteredManager(models.Model):
def __unicode__(self):
return "PositionEnteredManager"
def create_position_for_visibility_change(self, voter_id, office_we_vote_id, candidate_we_vote_id,
measure_we_vote_id, visibility_setting):
position_we_vote_id = ""
position_found = False
google_civic_election_id = 0
one_unique_ballot_item_variable_received = positive_value_exists(office_we_vote_id) or \
positive_value_exists(candidate_we_vote_id) or \
positive_value_exists(measure_we_vote_id)
if visibility_setting in FRIENDS_ONLY:
position_on_stage_starter = PositionForFriends
position_on_stage = PositionForFriends()
is_public_position = False
else:
position_on_stage_starter = PositionEntered
position_on_stage = PositionEntered()
is_public_position = True
if not voter_id \
or not one_unique_ballot_item_variable_received \
or visibility_setting not in (FRIENDS_ONLY, SHOW_PUBLIC):
status = "CREATE_POSITION_FOR_VISIBILITY_CHANGE-MISSING_REQUIRED_VARIABLE"
success = False
results = {
'success': success,
'status': status,
'position_we_vote_id': "",
'position': position_on_stage,
'position_found': position_found,
'is_public_position': is_public_position
}
return results
problem_with_duplicate = False
success = False
status = "CREATE_POSITION_FOR_VISIBILITY_CHANGE"
try:
# Check for duplicate in other table
position_we_vote_id = ""
organization_id = 0
organization_we_vote_id = ""
contest_office_id = 0
candidate_campaign_id = 0
contest_measure_id = 0
retrieve_position_for_friends = not is_public_position
voter_we_vote_id = ""
duplicate_results = self.retrieve_position(position_we_vote_id,
organization_id, organization_we_vote_id, voter_id,
contest_office_id, candidate_campaign_id, contest_measure_id,
retrieve_position_for_friends,
voter_we_vote_id,
office_we_vote_id, candidate_we_vote_id, measure_we_vote_id)
if duplicate_results['position_found']:
problem_with_duplicate = True
success = False
status = 'CREATE_POSITION_FOR_VISIBILITY_CHANGE-EXISTING_POSITION_CHECK_FAILED'
except Exception as e:
problem_with_duplicate = True
success = False
status = 'CREATE_POSITION_FOR_VISIBILITY_CHANGE-EXISTING_POSITION_CHECK_FAILED'
if problem_with_duplicate:
results = {
'success': success,
'status': status,
'position_we_vote_id': position_we_vote_id,
'position': position_on_stage,
'position_found': position_found,
'is_public_position': is_public_position
}
return results
# Now that we've checked to see that there isn't an entry from the other table, create a new on
try:
# Create new
ballot_item_display_name = ""
speaker_display_name = ""
candidate_campaign_id = None
if candidate_we_vote_id:
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign_from_we_vote_id(candidate_we_vote_id)
if results['candidate_campaign_found']:
candidate_campaign = results['candidate_campaign']
candidate_campaign_id = candidate_campaign.id
google_civic_election_id = candidate_campaign.google_civic_election_id
ballot_item_display_name = candidate_campaign.candidate_name
contest_measure_id = None
if measure_we_vote_id:
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure_from_we_vote_id(measure_we_vote_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
contest_measure_id = contest_measure.id
google_civic_election_id = contest_measure.google_civic_election_id
ballot_item_display_name = contest_measure.measure_title
contest_office_id = None
if office_we_vote_id:
contest_office_manager = ContestOfficeManager()
results = contest_office_manager.retrieve_contest_office_from_we_vote_id(office_we_vote_id)
if results['contest_office_found']:
contest_office = results['contest_office']
contest_office_id = contest_office.id
google_civic_election_id = contest_office.google_civic_election_id
ballot_item_display_name = contest_office.office_name
# In order to show a position publicly we need to tie the position to either organization_we_vote_id,
# public_figure_we_vote_id or candidate_we_vote_id. For now (2016-8-17) we assume organization
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_by_id(voter_id)
organization_id = 0
organization_we_vote_id = ""
voter_we_vote_id = ""
if results['voter_found']:
voter = results['voter']
voter_we_vote_id = voter.we_vote_id
organization_we_vote_id = voter.linked_organization_we_vote_id
if positive_value_exists(organization_we_vote_id):
# Look up the organization_id
organization_manager = OrganizationManager()
organization_results = organization_manager.retrieve_organization_from_we_vote_id(
voter.linked_organization_we_vote_id)
if organization_results['organization_found']:
organization = organization_results['organization']
organization_id = organization.id
speaker_display_name = organization.organization_name
position_on_stage = position_on_stage_starter(
voter_id=voter_id,
voter_we_vote_id=voter_we_vote_id,
candidate_campaign_id=candidate_campaign_id,
candidate_campaign_we_vote_id=candidate_we_vote_id,
contest_measure_id=contest_measure_id,
contest_measure_we_vote_id=measure_we_vote_id,
contest_office_id=contest_office_id,
contest_office_we_vote_id=office_we_vote_id,
google_civic_election_id=google_civic_election_id,
organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id,
ballot_item_display_name=ballot_item_display_name,
speaker_display_name=speaker_display_name,
)
position_on_stage.save()
position_we_vote_id = position_on_stage.we_vote_id
position_found = True
success = True
status = 'CREATE_POSITION_FOR_VISIBILITY_CHANGE-NEW_POSITION_SAVED'
if positive_value_exists(organization_id) and positive_value_exists(organization_we_vote_id) \
and positive_value_exists(google_civic_election_id):
voter_guide_manager = VoterGuideManager()
# Make sure we have a voter guide so others can find
if not voter_guide_manager.voter_guide_exists(organization_we_vote_id, google_civic_election_id):
voter_guide_manager.update_or_create_organization_voter_guide_by_election_id(
organization_we_vote_id, google_civic_election_id)
except Exception as e:
success = False
status = 'CREATE_POSITION_FOR_VISIBILITY_CHANGE-NEW_POSITION_COULD_NOT_BE_SAVED'
results = {
'success': success,
'status': status,
'position_we_vote_id': position_we_vote_id,
'position': position_on_stage,
'position_found': position_found,
'is_public_position': is_public_position
}
return results
def retrieve_organization_candidate_campaign_position(self, organization_id, candidate_campaign_id,
google_civic_election_id=False):
"""
Find a position based on the organization_id & candidate_campaign_id
:param organization_id:
:param candidate_campaign_id:
:param google_civic_election_id:
:return:
"""
organization_we_vote_id = ''
position_we_vote_id = ''
voter_id = 0
office_id = 0
contest_measure_id = 0
voter_we_vote_id = ''
contest_office_we_vote_id = ''
candidate_campaign_we_vote_id = ''
contest_measure_we_vote_id = ''
position_entered_manager = PositionEnteredManager()
return position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id, voter_id,
office_id, candidate_campaign_id, contest_measure_id,
voter_we_vote_id, contest_office_we_vote_id, candidate_campaign_we_vote_id, contest_measure_we_vote_id,
google_civic_election_id)
def retrieve_organization_candidate_campaign_position_with_we_vote_id(self, organization_id,
candidate_campaign_we_vote_id,
google_civic_election_id=False):
"""
Find a position based on the organization_id & candidate_campaign_we_vote_id
:param organization_id:
:param candidate_campaign_we_vote_id:
:param google_civic_election_id:
:return:
"""
organization_we_vote_id = ''
position_we_vote_id = ''
voter_id = 0
office_id = 0
contest_measure_id = 0
voter_we_vote_id = ''
contest_office_we_vote_id = ''
candidate_campaign_id = 0
contest_measure_we_vote_id = ''
position_entered_manager = PositionEnteredManager()
return position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id, voter_id,
office_id, candidate_campaign_id, contest_measure_id,
voter_we_vote_id, contest_office_we_vote_id, candidate_campaign_we_vote_id, contest_measure_we_vote_id,
google_civic_election_id)
def retrieve_organization_contest_measure_position(self, organization_id, contest_measure_id,
google_civic_election_id=False):
"""
Find a position based on the organization_id & contest_measure_id
:param organization_id:
:param contest_measure_id:
:param google_civic_election_id:
:return:
"""
organization_we_vote_id = ''
position_we_vote_id = ''
voter_id = 0
office_id = 0
candidate_campaign_id = 0
voter_we_vote_id = ''
contest_office_we_vote_id = ''
candidate_campaign_we_vote_id = ''
contest_measure_we_vote_id = ''
position_entered_manager = PositionEnteredManager()
return position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id, voter_id,
office_id, candidate_campaign_id, contest_measure_id,
voter_we_vote_id, contest_office_we_vote_id, candidate_campaign_we_vote_id, contest_measure_we_vote_id,
google_civic_election_id)
def retrieve_organization_contest_measure_position_with_we_vote_id(self, organization_id,
contest_measure_we_vote_id,
google_civic_election_id=False):
"""
Find a position based on the organization_id & contest_measure_we_vote_id
:param organization_id:
:param contest_measure_we_vote_id:
:param google_civic_election_id:
:return:
"""
organization_we_vote_id = ''
position_we_vote_id = ''
voter_id = 0
office_id = 0
contest_measure_id = 0
voter_we_vote_id = ''
contest_office_we_vote_id = ''
candidate_campaign_id = 0
candidate_campaign_we_vote_id = ''
position_entered_manager = PositionEnteredManager()
return position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id, voter_id,
office_id, candidate_campaign_id, contest_measure_id,
voter_we_vote_id, contest_office_we_vote_id, candidate_campaign_we_vote_id, contest_measure_we_vote_id,
google_civic_election_id)
def retrieve_voter_contest_office_position(self, voter_id, office_id):
organization_id = 0
organization_we_vote_id = ''
position_we_vote_id = ''
candidate_campaign_id = 0
contest_measure_id = 0
position_entered_manager = PositionEnteredManager()
return position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id, voter_id,
office_id, candidate_campaign_id, contest_measure_id)
def retrieve_voter_contest_office_position_with_we_vote_id(self, voter_id, contest_office_we_vote_id):
organization_id = 0
organization_we_vote_id = ''
position_we_vote_id = ''
office_id = 0
candidate_campaign_id = 0
contest_measure_id = 0
voter_we_vote_id = ""
candidate_campaign_we_vote_id = ''
contest_measure_we_vote_id = ''
position_entered_manager = PositionEnteredManager()
return position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id, voter_id,
office_id, candidate_campaign_id, contest_measure_id,
voter_we_vote_id, contest_office_we_vote_id, candidate_campaign_we_vote_id, contest_measure_we_vote_id
)
def retrieve_voter_candidate_campaign_position(self, voter_id, candidate_campaign_id):
organization_id = 0
organization_we_vote_id = ''
position_we_vote_id = ''
office_id = 0
contest_measure_id = 0
position_entered_manager = PositionEnteredManager()
return position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id, voter_id,
office_id, candidate_campaign_id, contest_measure_id)
def retrieve_voter_candidate_campaign_position_with_we_vote_id(self, voter_id, candidate_campaign_we_vote_id):
organization_id = 0
organization_we_vote_id = ''
position_we_vote_id = ''
office_id = 0
candidate_campaign_id = 0
contest_measure_id = 0
voter_we_vote_id = ''
contest_office_we_vote_id = ''
contest_measure_we_vote_id = ''
position_entered_manager = PositionEnteredManager()
return position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id, voter_id,
office_id, candidate_campaign_id, contest_measure_id,
voter_we_vote_id, contest_office_we_vote_id, candidate_campaign_we_vote_id, contest_measure_we_vote_id
)
def retrieve_voter_contest_measure_position(self, voter_id, contest_measure_id):
organization_id = 0
organization_we_vote_id = ''
position_we_vote_id = ''
office_id = 0
candidate_campaign_id = 0
position_entered_manager = PositionEnteredManager()
return position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id, voter_id,
office_id, candidate_campaign_id, contest_measure_id)
def retrieve_voter_contest_measure_position_with_we_vote_id(self, voter_id, contest_measure_we_vote_id):
organization_id = 0
organization_we_vote_id = ''
position_we_vote_id = ''
office_id = 0
candidate_campaign_id = 0
contest_measure_id = 0
voter_we_vote_id = ''
contest_office_we_vote_id = ''
candidate_campaign_we_vote_id = ''
position_entered_manager = PositionEnteredManager()
return position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id, voter_id,
office_id, candidate_campaign_id, contest_measure_id,
voter_we_vote_id, contest_office_we_vote_id, candidate_campaign_we_vote_id, contest_measure_we_vote_id
)
def retrieve_position_from_we_vote_id(self, position_we_vote_id):
organization_id = 0
organization_we_vote_id = ''
voter_id = 0
office_id = 0
candidate_campaign_id = 0
contest_measure_id = 0
position_entered_manager = PositionEnteredManager()
return position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id, voter_id,
office_id, candidate_campaign_id, contest_measure_id)
def retrieve_position_table_unknown(self, position_we_vote_id, organization_id, organization_we_vote_id, voter_id,
contest_office_id, candidate_campaign_id, contest_measure_id,
voter_we_vote_id='', contest_office_we_vote_id='',
candidate_campaign_we_vote_id='', contest_measure_we_vote_id='',
google_civic_election_id=False, vote_smart_time_span=False):
# Check public positions first
retrieve_position_for_friends = False
results = self.retrieve_position(position_we_vote_id, organization_id, organization_we_vote_id, voter_id,
contest_office_id, candidate_campaign_id, contest_measure_id,
retrieve_position_for_friends,
voter_we_vote_id, contest_office_we_vote_id, candidate_campaign_we_vote_id,
contest_measure_we_vote_id,
google_civic_election_id, vote_smart_time_span)
if results['position_found']:
return results
# If a public position wasn't found, now check for private position
retrieve_position_for_friends = True
return self.retrieve_position(position_we_vote_id, organization_id, organization_we_vote_id, voter_id,
contest_office_id, candidate_campaign_id, contest_measure_id,
retrieve_position_for_friends,
voter_we_vote_id, contest_office_we_vote_id, candidate_campaign_we_vote_id,
contest_measure_we_vote_id,
google_civic_election_id, vote_smart_time_span)
def retrieve_position(self, position_we_vote_id, organization_id, organization_we_vote_id, voter_id,
contest_office_id, candidate_campaign_id, contest_measure_id,
retrieve_position_for_friends=False,
voter_we_vote_id='', contest_office_we_vote_id='', candidate_campaign_we_vote_id='',
contest_measure_we_vote_id='',
google_civic_election_id=False, vote_smart_time_span=False):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
position_found = False
if retrieve_position_for_friends:
position_on_stage_starter = PositionForFriends
position_on_stage = PositionForFriends()
else:
position_on_stage_starter = PositionEntered
position_on_stage = PositionEntered()
success = False
is_public_position = None
if positive_value_exists(organization_we_vote_id) and not positive_value_exists(organization_id):
# Look up the organization_id
organization_manager = OrganizationManager()
organization_id = organization_manager.fetch_organization_id(organization_we_vote_id)
if positive_value_exists(voter_we_vote_id) and not positive_value_exists(voter_id):
# Look up the voter_id
voter_manager = VoterManager()
voter_id = voter_manager.fetch_local_id_from_we_vote_id(voter_we_vote_id)
try:
if positive_value_exists(position_we_vote_id):
status = "RETRIEVE_POSITION_FOUND_WITH_WE_VOTE_ID"
position_on_stage = position_on_stage_starter.objects.get(we_vote_id=position_we_vote_id)
position_found = True
success = True
# ###############################
# Organization
elif positive_value_exists(organization_id) and positive_value_exists(contest_office_id):
if positive_value_exists(google_civic_election_id):
status = "RETRIEVE_POSITION_FOUND_WITH_ORG_OFFICE_AND_ELECTION"
position_on_stage = position_on_stage_starter.objects.get(
organization_id=organization_id, contest_office_id=contest_office_id,
google_civic_election_id=google_civic_election_id)
# If still here, we found an existing position
position_found = True
success = True
elif positive_value_exists(vote_smart_time_span):
status = "RETRIEVE_POSITION_FOUND_WITH_ORG_OFFICE_AND_VOTE_SMART_TIME_SPAN"
position_on_stage = position_on_stage_starter.objects.get(
organization_id=organization_id, contest_office_id=contest_office_id,
vote_smart_time_span__iexact=vote_smart_time_span)
# If still here, we found an existing position
position_found = True
success = True
else:
status = "RETRIEVE_POSITION_FOUND_WITH_ORG_AND_OFFICE"
position_on_stage = position_on_stage_starter.objects.get(
organization_id=organization_id, contest_office_id=contest_office_id)
# If still here, we found an existing position
position_found = True
success = True
elif positive_value_exists(organization_id) and positive_value_exists(candidate_campaign_id):
if positive_value_exists(google_civic_election_id):
status = "RETRIEVE_POSITION_FOUND_WITH_ORG_CANDIDATE_AND_ELECTION"
position_on_stage = position_on_stage_starter.objects.get(
organization_id=organization_id, candidate_campaign_id=candidate_campaign_id,
google_civic_election_id=google_civic_election_id)
# If still here, we found an existing position
position_found = True
success = True
elif positive_value_exists(vote_smart_time_span):
status = "RETRIEVE_POSITION_FOUND_WITH_ORG_CANDIDATE_AND_VOTE_SMART_TIME_SPAN"
position_on_stage = position_on_stage_starter.objects.get(
organization_id=organization_id, candidate_campaign_id=candidate_campaign_id,
vote_smart_time_span__iexact=vote_smart_time_span)
# If still here, we found an existing position
position_found = True
success = True
else:
status = "RETRIEVE_POSITION_FOUND_WITH_ORG_AND_CANDIDATE"
position_on_stage = position_on_stage_starter.objects.get(
organization_id=organization_id, candidate_campaign_id=candidate_campaign_id)
# If still here, we found an existing position
position_found = True
success = True
elif positive_value_exists(organization_id) and positive_value_exists(candidate_campaign_we_vote_id):
if positive_value_exists(google_civic_election_id):
status = "RETRIEVE_POSITION_FOUND_WITH_ORG_CANDIDATE_WE_VOTE_ID_AND_ELECTION"
position_on_stage = position_on_stage_starter.objects.get(
organization_id=organization_id, candidate_campaign_we_vote_id=candidate_campaign_we_vote_id,
google_civic_election_id=google_civic_election_id)
# If still here, we found an existing position
position_found = True
success = True
elif positive_value_exists(vote_smart_time_span):
status = "RETRIEVE_POSITION_FOUND_WITH_ORG_CANDIDATE_WE_VOTE_ID_AND_VOTE_SMART_TIME_SPAN"
position_on_stage = position_on_stage_starter.objects.get(
organization_id=organization_id,
candidate_campaign_we_vote_id=candidate_campaign_we_vote_id,
vote_smart_time_span__iexact=vote_smart_time_span)
# If still here, we found an existing position
position_found = True
success = True
else:
status = "RETRIEVE_POSITION_FOUND_WITH_ORG_AND_CANDIDATE_WE_VOTE_ID"
position_on_stage = position_on_stage_starter.objects.get(
organization_id=organization_id, candidate_campaign_we_vote_id=candidate_campaign_we_vote_id)
# If still here, we found an existing position
position_found = True
success = True
elif positive_value_exists(organization_id) and positive_value_exists(contest_measure_id):
if positive_value_exists(google_civic_election_id):
status = "RETRIEVE_POSITION_FOUND_WITH_ORG_MEASURE_AND_ELECTION"
position_on_stage = position_on_stage_starter.objects.get(
organization_id=organization_id, contest_measure_id=contest_measure_id,
google_civic_election_id=google_civic_election_id)
# If still here, we found an existing position
position_found = True
success = True
elif positive_value_exists(vote_smart_time_span):
status = "RETRIEVE_POSITION_FOUND_WITH_ORG_MEASURE_AND_VOTE_SMART_TIME_SPAN"
position_on_stage = position_on_stage_starter.objects.get(
organization_id=organization_id, contest_measure_id=contest_measure_id,
vote_smart_time_span__iexact=vote_smart_time_span)
# If still here, we found an existing position
position_found = True
success = True
else:
status = "RETRIEVE_POSITION_FOUND_WITH_ORG_AND_MEASURE"
position_on_stage = position_on_stage_starter.objects.get(
organization_id=organization_id, contest_measure_id=contest_measure_id)
position_found = True
success = True
elif positive_value_exists(organization_id) and positive_value_exists(contest_measure_we_vote_id):
if positive_value_exists(google_civic_election_id):
status = "RETRIEVE_POSITION_FOUND_WITH_ORG_MEASURE_WE_VOTE_ID_AND_ELECTION"
position_on_stage = position_on_stage_starter.objects.get(
organization_id=organization_id, contest_measure_we_vote_id=contest_measure_we_vote_id,
google_civic_election_id=google_civic_election_id)
# If still here, we found an existing position
position_found = True
success = True
elif positive_value_exists(vote_smart_time_span):
status = "RETRIEVE_POSITION_FOUND_WITH_ORG_MEASURE_WE_VOTE_ID_AND_VOTE_SMART_TIME_SPAN"
position_on_stage = position_on_stage_starter.objects.get(
organization_id=organization_id, contest_measure_we_vote_id=contest_measure_we_vote_id,
vote_smart_time_span__iexact=vote_smart_time_span)
# If still here, we found an existing position
position_found = True
success = True
else:
status = "RETRIEVE_POSITION_FOUND_WITH_ORG_AND_MEASURE_WE_VOTE_ID"
position_on_stage = position_on_stage_starter.objects.get(
organization_id=organization_id, contest_measure_we_vote_id=contest_measure_we_vote_id)
position_found = True
success = True
# ###############################
# Voter
elif positive_value_exists(voter_id) and positive_value_exists(contest_office_id):
status = "RETRIEVE_POSITION_FOUND_WITH_VOTER_AND_OFFICE"
position_on_stage = position_on_stage_starter.objects.get(
voter_id=voter_id, contest_office_id=contest_office_id)
position_found = True
success = True
elif positive_value_exists(voter_id) and positive_value_exists(contest_office_we_vote_id):
status = "RETRIEVE_POSITION_FOUND_WITH_VOTER_AND_OFFICE_WE_VOTE_ID"
position_on_stage = position_on_stage_starter.objects.get(
voter_id=voter_id, contest_office_we_vote_id=contest_office_we_vote_id)
position_found = True
success = True
elif positive_value_exists(voter_id) and positive_value_exists(candidate_campaign_id):
status = "RETRIEVE_POSITION_FOUND_WITH_VOTER_AND_CANDIDATE"
position_on_stage = position_on_stage_starter.objects.get(
voter_id=voter_id, candidate_campaign_id=candidate_campaign_id)
position_found = True
success = True
elif positive_value_exists(voter_id) and positive_value_exists(candidate_campaign_we_vote_id):
status = "RETRIEVE_POSITION_FOUND_WITH_VOTER_AND_CANDIDATE_WE_VOTE_ID"
position_on_stage = position_on_stage_starter.objects.get(
voter_id=voter_id, candidate_campaign_we_vote_id=candidate_campaign_we_vote_id)
position_found = True
success = True
elif positive_value_exists(voter_id) and positive_value_exists(contest_measure_id):
status = "RETRIEVE_POSITION_FOUND_WITH_VOTER_AND_MEASURE"
position_on_stage = position_on_stage_starter.objects.get(
voter_id=voter_id, contest_measure_id=contest_measure_id)
position_found = True
success = True
elif positive_value_exists(voter_id) and positive_value_exists(contest_measure_we_vote_id):
status = "RETRIEVE_POSITION_FOUND_WITH_VOTER_AND_MEASURE_WE_VOTE_ID"
position_on_stage = position_on_stage_starter.objects.get(
voter_id=voter_id, contest_measure_we_vote_id=contest_measure_we_vote_id)
position_found = True
success = True
else:
status = "RETRIEVE_POSITION_INSUFFICIENT_VARIABLES"
except MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
success = False
status = "RETRIEVE_POSITION_MULTIPLE_FOUND"
if retrieve_position_for_friends:
position_on_stage = PositionForFriends()
else:
position_on_stage = PositionEntered()
except ObjectDoesNotExist:
error_result = False
exception_does_not_exist = True
success = True
status = "RETRIEVE_POSITION_NONE_FOUND"
is_public_position = None
if retrieve_position_for_friends:
position_on_stage = PositionForFriends()
else:
position_on_stage = PositionEntered()
if success:
if retrieve_position_for_friends:
is_public_position = False
position_on_stage.is_public_position = is_public_position
else:
is_public_position = True
position_on_stage.is_public_position = is_public_position
results = {
'success': success,
'status': status,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'position_found': position_found,
'position': position_on_stage,
'is_support': position_on_stage.is_support(),
'is_positive_rating': position_on_stage.is_positive_rating(),
'is_support_or_positive_rating': position_on_stage.is_support_or_positive_rating(),
'is_oppose': position_on_stage.is_oppose(),
'is_negative_rating': position_on_stage.is_negative_rating(),
'is_oppose_or_negative_rating': position_on_stage.is_oppose_or_negative_rating(),
'is_no_stance': position_on_stage.is_no_stance(),
'is_information_only': position_on_stage.is_information_only(),
'is_still_deciding': position_on_stage.is_still_deciding(),
'is_public_position': is_public_position,
'date_last_changed': position_on_stage.date_last_changed,
'date_entered': position_on_stage.date_entered,
'google_civic_election_id': google_civic_election_id,
}
return results
def transfer_to_public_position(self, existing_position):
# Check to make sure existing_position comes from PositionForFriends
if not existing_position._meta.object_name == "PositionForFriends":
results = {
'success': False,
'status': "SWITCH_TO_PUBLIC_POSITION_SUCCESS-NOT_PositionForFriends",
'position_copied': False,
'position_deleted': False,
'position': PositionEntered(),
'is_public_position': None,
}
return results
status = ""
# In order to show a position publicly we need to tie the position to either organization_we_vote_id,
# public_figure_we_vote_id or candidate_we_vote_id. For now (2016-8-17) we use only organization
# Heal data: Make sure we have both org id and org we_vote_id
organization_we_vote_id_missing = positive_value_exists(existing_position.organization_id) \
and not positive_value_exists(existing_position.organization_we_vote_id)
organization_id_missing = positive_value_exists(existing_position.organization_we_vote_id) \
and not positive_value_exists(existing_position.organization_id)
if organization_id_missing or organization_we_vote_id_missing:
organization_manager = OrganizationManager()
# Heal data: Make sure we have both org_id and org_we_vote_id
if organization_id_missing:
existing_position.organization_id = organization_manager.fetch_organization_id(
existing_position.organization_we_vote_id)
elif organization_we_vote_id_missing:
existing_position.organization_we_vote_id = organization_manager.fetch_we_vote_id_from_local_id(
existing_position.organization_id)
# Heal data: Make sure we have both voter_id and voter_we_vote_id
voter_id_missing = positive_value_exists(existing_position.voter_we_vote_id) \
and not positive_value_exists(existing_position.voter_id)
voter_we_vote_id_missing = positive_value_exists(existing_position.voter_id) \
and not positive_value_exists(existing_position.voter_we_vote_id)
if voter_id_missing:
existing_position.voter_id = fetch_voter_id_from_voter_we_vote_id(existing_position.voter_we_vote_id)
elif voter_we_vote_id_missing:
existing_position.voter_we_vote_id = fetch_voter_we_vote_id_from_voter_id(existing_position.voter_id)
# Is there any organization data save with this position yet?
organization_link_missing = not positive_value_exists(existing_position.organization_we_vote_id)
if organization_link_missing:
# If here, there isn't any organization information stored with this position. We need to see if
# an organization exists that is linked to this voter, and if so, heal the data
voter_manager = VoterManager()
# Look up the voter who owns this position
if positive_value_exists(existing_position.voter_we_vote_id):
voter_owner_results = voter_manager.retrieve_voter_by_we_vote_id(existing_position.voter_we_vote_id)
if voter_owner_results['voter_found']:
voter_owner = voter_owner_results['voter']
organization_manager = OrganizationManager()
if positive_value_exists(voter_owner.linked_organization_we_vote_id):
# This voter is linked to an org, so we can bring that data over to save in this position
existing_position.organization_we_vote_id = voter_owner.linked_organization_we_vote_id
existing_position.organization_id = organization_manager.fetch_organization_id(
existing_position.organization_we_vote_id)
else:
# If here, we need to do some looking to see if an org exists that matches this voter
status += "POSITION_SWITCH_TO_PUBLIC_POSITION-LOOK_FOR_ORG"
pass
else:
status += "POSITION_SWITCH_TO_PUBLIC_POSITION-VOTER_WE_VOTE_ID_NOT_FOUND"
# Verify data: We may have in the position org and voter, but they don't match
# ??? If in doubt, give position to the organization
# Position could have been from voter friends-only, so we don't have *either* org_id or org_we_vote_id
# Once they push them live, we need to make sure an organization exists for them
# voter_manager = VoterManager()
# ######################
# CASE 1: It was a friend's only position, and we need to create an org to push it public with
# if positive_value_exists(existing_position.voter_we_vote_id):
# organization_missing = not positive_value_exists(existing_position.organization_id) \
# and not positive_value_exists(existing_position.organization_we_vote_id)
# if organization_missing:
# pass
# Check voter record for linked org
# If voter record has link to org, fill the position with the org ids
#
# If no linked_org in voter, check to see if voter's twitter handle wasn't being used by existing org
#
# If twitter handle not used by existing org, create new org
#
#
# Check that the org isn't already linked to another voter
# If not linked,
# # Heal the data
# results = voter_manager.retrieve_voter_by_we_vote_id(existing_position.voter_we_vote_id)
# if results['voter_found']:
# voter = results['voter']
# # We found this voter from the existing position. Now we must make sure that the voter and
# # organization specified by the position all match
# if positive_value_exists(voter.linked_organization_we_vote_id) and
# voter.linked_organization_we_vote_id == existing_position:
# if organization_id_missing:
# existing_position.organization_we_vote_id = voter.linked_organization_we_vote_id
# # Look up the organization_id
# existing_position.organization_id = organization_manager.fetch_organization_id(
# voter.linked_organization_we_vote_id)
# ######################
# CASE 2: It is an org position, and we need to tie it to a voter
# ######################
# Note: We make sure a voter guide exists for this election and organization, in switch_position_visibility
switch_to_public_position = True
switch_position_visibility_results = self.switch_position_visibility(
existing_position, switch_to_public_position)
results = {
'success': switch_position_visibility_results['success'],
'status': status + switch_position_visibility_results['status'],
'position_copied': switch_position_visibility_results['position_copied'],
'position_deleted': switch_position_visibility_results['position_deleted'],
'position': switch_position_visibility_results['position'],
'is_public_position': switch_position_visibility_results['is_public_position'],
}
return results
def transfer_to_friends_only_position(self, existing_position):
# Check to make sure existing_position comes from PositionForFriends
if not existing_position._meta.object_name == "PositionEntered":
results = {
'success': False,
'status': "SWITCH_TO_FRIENDS_ONLY_POSITION_SUCCESS-NOT_PositionEntered",
'position_copied': False,
'position_deleted': False,
'position': PositionForFriends(),
'is_public_position': None,
}
return results
switch_to_public_position = False
return self.switch_position_visibility(existing_position, switch_to_public_position)
def switch_position_visibility(self, existing_position, switch_to_public_position):
# We assume one does NOT exist in the other table
position_deleted = False
if switch_to_public_position:
new_position_starter = PositionEntered
new_position = PositionEntered() # For errors
else:
new_position_starter = PositionForFriends
new_position = PositionForFriends()
try:
# Make sure a google_civic_election_id is stored
if not positive_value_exists(existing_position.google_civic_election_id):
# We want to retrieve the google_civic_election_id from the ballot item object
if positive_value_exists(existing_position.candidate_campaign_we_vote_id):
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign_from_we_vote_id(
existing_position.candidate_campaign_we_vote_id)
if results['candidate_campaign_found']:
candidate_campaign = results['candidate_campaign']
existing_position.google_civic_election_id = \
candidate_campaign.google_civic_election_id
elif positive_value_exists(existing_position.contest_measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure_from_we_vote_id(
existing_position.contest_measure_we_vote_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
existing_position.google_civic_election_id = contest_measure.google_civic_election_id
elif positive_value_exists(existing_position.contest_office_we_vote_id):
contest_office_manager = ContestOfficeManager()
results = contest_office_manager.retrieve_contest_office_from_we_vote_id(
existing_position.contest_office_we_vote_id)
if results['contest_office_found']:
contest_office = results['contest_office']
existing_position.google_civic_election_id = contest_office.google_civic_election_id
new_position = new_position_starter.objects.create(
we_vote_id=existing_position.we_vote_id,
date_entered=existing_position.date_entered,
date_last_changed=existing_position.date_last_changed,
organization_id=existing_position.organization_id,
organization_we_vote_id=existing_position.organization_we_vote_id,
voter_we_vote_id=existing_position.voter_we_vote_id,
voter_id=existing_position.voter_id,
google_civic_election_id=existing_position.google_civic_election_id,
google_civic_candidate_name=existing_position.google_civic_candidate_name,
tweet_source_id=existing_position.tweet_source_id,
state_code=existing_position.state_code,
ballot_item_display_name=existing_position.ballot_item_display_name,
ballot_item_image_url_https=existing_position.ballot_item_image_url_https,
ballot_item_twitter_handle=existing_position.ballot_item_twitter_handle,
contest_office_we_vote_id=existing_position.contest_office_we_vote_id,
contest_office_id=existing_position.contest_office_id,
candidate_campaign_we_vote_id=existing_position.candidate_campaign_we_vote_id,
candidate_campaign_id=existing_position.candidate_campaign_id,
politician_we_vote_id=existing_position.politician_we_vote_id,
politician_id=existing_position.politician_id,
contest_measure_we_vote_id=existing_position.contest_measure_we_vote_id,
contest_measure_id=existing_position.contest_measure_id,
google_civic_measure_title=existing_position.google_civic_measure_title,
stance=existing_position.stance,
statement_text=existing_position.statement_text,
statement_html=existing_position.statement_html,
more_info_url=existing_position.more_info_url,
from_scraper=existing_position.from_scraper,
organization_certified=existing_position.organization_certified,
volunteer_certified=existing_position.volunteer_certified,
twitter_user_entered_position_id=existing_position.twitter_user_entered_position_id,
voter_entering_position_id=existing_position.voter_entering_position_id,
public_figure_we_vote_id=existing_position.public_figure_we_vote_id,
vote_smart_time_span=existing_position.vote_smart_time_span,
vote_smart_rating_id=existing_position.vote_smart_rating_id,
vote_smart_rating=existing_position.vote_smart_rating,
vote_smart_rating_name=existing_position.vote_smart_rating_name,
speaker_display_name=existing_position.speaker_display_name,
speaker_image_url_https=existing_position.speaker_image_url_https,
speaker_twitter_handle=existing_position.speaker_twitter_handle,
)
status = 'SWITCH_POSITION_VISIBILITY_SUCCESS'
position_copied = True
success = True
if switch_to_public_position:
is_public_position = True
if positive_value_exists(existing_position.organization_we_vote_id) \
and positive_value_exists(existing_position.google_civic_election_id):
voter_guide_manager = VoterGuideManager()
# Make sure we have a voter guide so others can find
if not voter_guide_manager.voter_guide_exists(existing_position.organization_we_vote_id,
existing_position.google_civic_election_id):
voter_guide_manager.update_or_create_organization_voter_guide_by_election_id(
existing_position.organization_we_vote_id, existing_position.google_civic_election_id)
else:
is_public_position = False
except Exception as e:
status = 'SWITCH_POSITION_VISIBILITY_FAILED'
position_copied = False
success = False
if switch_to_public_position:
is_public_position = False
else:
is_public_position = True
if position_copied:
# If here, we successfully copied the position and now we need to delete the old one
try:
existing_position.delete()
position_deleted = True
except Exception as e:
status = 'SWITCH_POSITION_VISIBILITY_FAILED-UNABLE_TO_DELETE'
position_deleted = False
success = False
results = {
'success': success,
'status': status,
'position_copied': position_copied,
'position_deleted': position_deleted,
'position': new_position,
'is_public_position': is_public_position,
}
return results
def merge_into_public_position(self, position_to_keep):
status = "MERGE_INTO_PUBLIC_POSITION "
# Check to see if there is an equivalent position in the PositionForFriends table
position_we_vote_id = ""
retrieve_position_for_friends = True
ballot_item_identifier_found = positive_value_exists(position_to_keep.contest_office_id) \
or positive_value_exists(position_to_keep.candidate_campaign_id) \
or positive_value_exists(position_to_keep.contest_measure_id) \
or positive_value_exists(position_to_keep.contest_office_we_vote_id) \
or positive_value_exists(position_to_keep.candidate_campaign_we_vote_id) \
or positive_value_exists(position_to_keep.contest_measure_we_vote_id)
if ballot_item_identifier_found:
google_civic_election_id = 0 # Not necessary if there is ballot_item
else:
google_civic_election_id = position_to_keep.google_civic_election_id
results = self.retrieve_position(position_we_vote_id,
position_to_keep.organization_id, position_to_keep.organization_we_vote_id,
position_to_keep.voter_id,
position_to_keep.contest_office_id, position_to_keep.candidate_campaign_id,
position_to_keep.contest_measure_id,
retrieve_position_for_friends,
position_to_keep.voter_we_vote_id, position_to_keep.contest_office_we_vote_id,
position_to_keep.candidate_campaign_we_vote_id,
position_to_keep.contest_measure_we_vote_id,
google_civic_election_id,
position_to_keep.vote_smart_time_span)
if results['position_found']:
dead_position = results['position']
else:
results = {
'success': True,
'status': "MERGE_INTO_PUBLIC_POSITION-NO_NEED",
'position_merged': False,
'position_deleted': False,
'is_public_position': True,
}
return results
merge_position_visibility_results = self.merge_position_visibility(
position_to_keep, dead_position)
results = {
'success': merge_position_visibility_results['success'],
'status': status + merge_position_visibility_results['status'],
'position_copied': merge_position_visibility_results['position_copied'],
'position_deleted': merge_position_visibility_results['position_deleted'],
'position': merge_position_visibility_results['position'],
'is_public_position': merge_position_visibility_results['is_public_position'],
}
return results
def merge_into_friends_only_position(self, position_to_keep):
status = "MERGE_INTO_FRIENDS_ONLY_POSITION "
# Check to see if there is an equivalent position in the PositionEntered table
position_we_vote_id = ""
retrieve_position_for_friends = False
ballot_item_identifier_found = positive_value_exists(position_to_keep.contest_office_id) \
or positive_value_exists(position_to_keep.candidate_campaign_id) \
or positive_value_exists(position_to_keep.contest_measure_id) \
or positive_value_exists(position_to_keep.contest_office_we_vote_id) \
or positive_value_exists(position_to_keep.candidate_campaign_we_vote_id) \
or positive_value_exists(position_to_keep.contest_measure_we_vote_id)
if ballot_item_identifier_found:
google_civic_election_id = 0 # Not necessary if there is ballot_item
else:
google_civic_election_id = position_to_keep.google_civic_election_id
results = self.retrieve_position(position_we_vote_id,
position_to_keep.organization_id, position_to_keep.organization_we_vote_id,
position_to_keep.voter_id,
position_to_keep.contest_office_id, position_to_keep.candidate_campaign_id,
position_to_keep.contest_measure_id,
retrieve_position_for_friends,
position_to_keep.voter_we_vote_id, position_to_keep.contest_office_we_vote_id,
position_to_keep.candidate_campaign_we_vote_id,
position_to_keep.contest_measure_we_vote_id,
google_civic_election_id,
position_to_keep.vote_smart_time_span)
if results['position_found']:
dead_position = results['position']
else:
results = {
'success': True,
'status': "MERGE_INTO_FRIENDS_ONLY_POSITION-NO_NEED",
'position_merged': False,
'position_deleted': False,
'is_public_position': True,
}
return results
merge_position_visibility_results = self.merge_position_visibility(
position_to_keep, dead_position)
results = {
'success': merge_position_visibility_results['success'],
'status': status + merge_position_visibility_results['status'],
'position_copied': merge_position_visibility_results['position_copied'],
'position_deleted': merge_position_visibility_results['position_deleted'],
'position': merge_position_visibility_results['position'],
'is_public_position': merge_position_visibility_results['is_public_position'],
}
return results
def merge_position_visibility(self, position_to_keep, dead_position):
# We want to see if dead_position has any values to save before we delete it.
data_transferred = False
if not positive_value_exists(position_to_keep.more_info_url):
if positive_value_exists(dead_position.more_info_url):
position_to_keep.more_info_url = dead_position.more_info_url
data_transferred = True
if not positive_value_exists(position_to_keep.statement_text):
if positive_value_exists(dead_position.statement_text):
position_to_keep.statement_text = dead_position.statement_text
data_transferred = True
if not positive_value_exists(position_to_keep.statement_html):
if positive_value_exists(dead_position.statement_html):
position_to_keep.statement_html = dead_position.statement_html
data_transferred = True
if data_transferred:
status = "MERGE_POSITION_VISIBILITY-DATA_TRANSFERRED"
else:
status = "MERGE_POSITION_VISIBILITY-DATA_NOT_TRANSFERRED"
# Now delete the dead_position
try:
dead_position.delete()
position_deleted = True
success = True
except Exception as e:
status = 'SWITCH_POSITION_VISIBILITY_FAILED-UNABLE_TO_DELETE'
position_deleted = False
success = False
results = {
'success': success,
'status': status,
'position': position_to_keep,
'position_deleted': position_deleted,
'position_data_transferred': data_transferred,
}
return results
def toggle_on_voter_support_for_candidate_campaign(self, voter_id, candidate_campaign_id):
stance = SUPPORT
position_entered_manager = PositionEnteredManager()
return position_entered_manager.toggle_on_voter_position_for_candidate_campaign(
voter_id, candidate_campaign_id, stance)
def toggle_off_voter_support_for_candidate_campaign(self, voter_id, candidate_campaign_id):
stance = NO_STANCE
position_entered_manager = PositionEnteredManager()
return position_entered_manager.toggle_on_voter_position_for_candidate_campaign(
voter_id, candidate_campaign_id, stance)
def toggle_on_voter_oppose_for_candidate_campaign(self, voter_id, candidate_campaign_id):
stance = OPPOSE
position_entered_manager = PositionEnteredManager()
return position_entered_manager.toggle_on_voter_position_for_candidate_campaign(
voter_id, candidate_campaign_id, stance)
def toggle_off_voter_oppose_for_candidate_campaign(self, voter_id, candidate_campaign_id):
stance = NO_STANCE
position_entered_manager = PositionEnteredManager()
return position_entered_manager.toggle_on_voter_position_for_candidate_campaign(
voter_id, candidate_campaign_id, stance)
def toggle_on_voter_position_for_candidate_campaign(self, voter_id, candidate_campaign_id, stance):
# Does a position from this voter already exist?
position_entered_manager = PositionEnteredManager()
results = position_entered_manager.retrieve_voter_candidate_campaign_position(voter_id, candidate_campaign_id)
is_public_position = results['is_public_position']
if results['MultipleObjectsReturned']:
logger.warn("delete all but one and take it over?")
status = 'MultipleObjectsReturned-WORK_NEEDED'
if is_public_position:
voter_position_on_stage = PositionEntered()
else:
voter_position_on_stage = PositionForFriends()
results = {
'status': status,
'success': False,
'position_we_vote_id': '',
'position': voter_position_on_stage,
}
return results
voter_position_found = results['position_found']
voter_position_on_stage = results['position']
contest_measure_id = 0
return position_entered_manager.toggle_voter_position(voter_id, voter_position_found, voter_position_on_stage,
stance, candidate_campaign_id, contest_measure_id,
is_public_position)
def toggle_voter_position(self, voter_id, voter_position_found, voter_position_on_stage, stance,
candidate_campaign_id, contest_measure_id, is_public_position):
voter_position_on_stage_found = False
position_we_vote_id = ''
if voter_position_found:
# Update this position with new values
try:
voter_position_on_stage.stance = stance
if voter_position_on_stage.candidate_campaign_id:
if not positive_value_exists(voter_position_on_stage.candidate_campaign_we_vote_id):
# Heal the data, and fill in the candidate_campaign_we_vote_id
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign_from_id(
candidate_campaign_id)
if results['candidate_campaign_found']:
candidate_campaign = results['candidate_campaign']
voter_position_on_stage.candidate_campaign_we_vote_id = candidate_campaign.we_vote_id
voter_position_on_stage.google_civic_election_id = \
candidate_campaign.google_civic_election_id
voter_position_on_stage.ballot_item_display_name = candidate_campaign.candidate_name
if voter_position_on_stage.contest_measure_id:
if not positive_value_exists(voter_position_on_stage.contest_measure_we_vote_id):
# Heal the data, and fill in the contest_measure_we_vote_id
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure_from_id(contest_measure_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
voter_position_on_stage.contest_measure_we_vote_id = contest_measure.we_vote_id
voter_position_on_stage.google_civic_election_id = contest_measure.google_civic_election_id
voter_position_on_stage.ballot_item_display_name = contest_measure.measure_title
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_by_id(voter_id)
voter_we_vote_id = ""
linked_organization_we_vote_id = ""
if results['voter_found']:
voter = results['voter']
voter_we_vote_id = voter.we_vote_id
linked_organization_we_vote_id = voter.linked_organization_we_vote_id
if not positive_value_exists(voter_position_on_stage.voter_we_vote_id):
# Heal the data: Make sure we have a voter_we_vote_id
voter_position_on_stage.voter_we_vote_id = voter_we_vote_id
if not positive_value_exists(voter_position_on_stage.speaker_display_name) \
or not positive_value_exists(voter_position_on_stage.organization_id) \
or not positive_value_exists(voter_position_on_stage.organization_we_vote_id):
# Heal the data
if positive_value_exists(linked_organization_we_vote_id):
if not positive_value_exists(voter_position_on_stage.organization_we_vote_id):
voter_position_on_stage.organization_we_vote_id = linked_organization_we_vote_id
# Look up the organization_id
organization_manager = OrganizationManager()
organization_results = organization_manager.retrieve_organization_from_we_vote_id(
linked_organization_we_vote_id)
if organization_results['organization_found']:
organization = organization_results['organization']
organization_id = organization.id
speaker_display_name = organization.organization_name
if not positive_value_exists(voter_position_on_stage.speaker_display_name):
voter_position_on_stage.speaker_display_name = speaker_display_name
if not positive_value_exists(voter_position_on_stage.organization_id):
voter_position_on_stage.organization_id = organization_id
voter_position_on_stage.save()
position_we_vote_id = voter_position_on_stage.we_vote_id
voter_position_on_stage_found = True
status = 'STANCE_UPDATED'
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
status = 'STANCE_COULD_NOT_BE_UPDATED'
else:
try:
# Create new
candidate_campaign_we_vote_id = ""
google_civic_election_id = 0
ballot_item_display_name = ""
if candidate_campaign_id:
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign_from_id(
candidate_campaign_id)
if results['candidate_campaign_found']:
candidate_campaign = results['candidate_campaign']
candidate_campaign_we_vote_id = candidate_campaign.we_vote_id
google_civic_election_id = candidate_campaign.google_civic_election_id
ballot_item_display_name = candidate_campaign.candidate_name
contest_measure_we_vote_id = ""
if contest_measure_id:
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure_from_id(contest_measure_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
contest_measure_we_vote_id = contest_measure.we_vote_id
google_civic_election_id = contest_measure.google_civic_election_id
ballot_item_display_name = contest_measure.measure_title
# In order to show a position publicly we need to tie the position to either organization_we_vote_id,
# public_figure_we_vote_id or candidate_we_vote_id. For now (2016-8-17) we assume organization
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_by_id(voter_id)
organization_id = 0
organization_we_vote_id = ""
voter_we_vote_id = ""
speaker_display_name = ""
if results['voter_found']:
voter = results['voter']
voter_we_vote_id = voter.we_vote_id
organization_we_vote_id = voter.linked_organization_we_vote_id
if positive_value_exists(organization_we_vote_id):
# Look up the organization_id
organization_manager = OrganizationManager()
organization_results = organization_manager.retrieve_organization_from_we_vote_id(
voter.linked_organization_we_vote_id)
if organization_results['organization_found']:
organization = organization_results['organization']
organization_id = organization.id
speaker_display_name = organization.organization_name
if is_public_position:
voter_position_on_stage = PositionEntered(
voter_id=voter_id,
voter_we_vote_id=voter_we_vote_id,
candidate_campaign_id=candidate_campaign_id,
candidate_campaign_we_vote_id=candidate_campaign_we_vote_id,
contest_measure_id=contest_measure_id,
contest_measure_we_vote_id=contest_measure_we_vote_id,
stance=stance,
google_civic_election_id=google_civic_election_id,
organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id,
ballot_item_display_name=ballot_item_display_name,
speaker_display_name=speaker_display_name,
)
else:
voter_position_on_stage = PositionForFriends(
voter_id=voter_id,
voter_we_vote_id=voter_we_vote_id,
candidate_campaign_id=candidate_campaign_id,
candidate_campaign_we_vote_id=candidate_campaign_we_vote_id,
contest_measure_id=contest_measure_id,
contest_measure_we_vote_id=contest_measure_we_vote_id,
stance=stance,
google_civic_election_id=google_civic_election_id,
organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id,
ballot_item_display_name=ballot_item_display_name,
speaker_display_name=speaker_display_name,
)
voter_position_on_stage.save()
position_we_vote_id = voter_position_on_stage.we_vote_id
voter_position_on_stage_found = True
status = 'NEW_STANCE_SAVED'
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
status = 'NEW_STANCE_COULD_NOT_BE_SAVED'
results = {
'status': status,
'success': True if voter_position_on_stage_found else False,
'position_we_vote_id': position_we_vote_id,
'position': voter_position_on_stage,
}
return results
def toggle_on_voter_support_for_contest_measure(self, voter_id, contest_measure_id):
stance = SUPPORT
position_entered_manager = PositionEnteredManager()
return position_entered_manager.toggle_on_voter_position_for_contest_measure(
voter_id, contest_measure_id, stance)
def toggle_off_voter_support_for_contest_measure(self, voter_id, contest_measure_id):
stance = NO_STANCE
position_entered_manager = PositionEnteredManager()
return position_entered_manager.toggle_on_voter_position_for_contest_measure(
voter_id, contest_measure_id, stance)
def toggle_on_voter_oppose_for_contest_measure(self, voter_id, contest_measure_id):
stance = OPPOSE
position_entered_manager = PositionEnteredManager()
return position_entered_manager.toggle_on_voter_position_for_contest_measure(
voter_id, contest_measure_id, stance)
def toggle_off_voter_oppose_for_contest_measure(self, voter_id, contest_measure_id):
stance = NO_STANCE
position_entered_manager = PositionEnteredManager()
return position_entered_manager.toggle_on_voter_position_for_contest_measure(
voter_id, contest_measure_id, stance)
def toggle_on_voter_position_for_contest_measure(self, voter_id, contest_measure_id, stance):
# Does a position from this voter already exist?
position_entered_manager = PositionEnteredManager()
results = position_entered_manager.retrieve_voter_contest_measure_position(voter_id, contest_measure_id)
is_public_position = results['is_public_position']
if results['MultipleObjectsReturned']:
logger.warn("delete all but one and take it over?")
status = 'MultipleObjectsReturned-WORK_NEEDED'
if is_public_position:
voter_position_on_stage = PositionEntered()
else:
voter_position_on_stage = PositionForFriends()
results = {
'status': status,
'success': False,
'position_we_vote_id': '',
'position': voter_position_on_stage,
}
return results
voter_position_found = results['position_found']
voter_position_on_stage = results['position']
candidate_campaign_id = 0
return position_entered_manager.toggle_voter_position(voter_id, voter_position_found, voter_position_on_stage,
stance, candidate_campaign_id, contest_measure_id,
is_public_position)
def update_or_create_position_comment(self, position_we_vote_id, voter_id, voter_we_vote_id,
office_we_vote_id, candidate_we_vote_id, measure_we_vote_id,
statement_text, statement_html):
voter_position_found = False
is_public_position = False
# Set this in case of error
voter_position_on_stage = PositionForFriends()
if positive_value_exists(position_we_vote_id):
# Retrieve the position this way
pass
else:
if not positive_value_exists(voter_id):
voter_id = fetch_voter_id_from_voter_we_vote_id(voter_we_vote_id)
if positive_value_exists(candidate_we_vote_id):
results = self.retrieve_voter_candidate_campaign_position_with_we_vote_id(
voter_id, candidate_we_vote_id)
if results['position_found']:
voter_position_found = True
voter_position_on_stage = results['position']
is_public_position = results['is_public_position']
elif positive_value_exists(office_we_vote_id):
# TODO
pass
elif positive_value_exists(measure_we_vote_id):
results = self.retrieve_voter_contest_measure_position_with_we_vote_id(
voter_id, measure_we_vote_id)
if results['position_found']:
voter_position_found = True
voter_position_on_stage = results['position']
is_public_position = results['is_public_position']
voter_position_on_stage_found = False
position_we_vote_id = ''
if voter_position_found:
problem_with_duplicate = False
success = False
status = "UPDATE_OR_CREATE_POSITION_COMMENT-CHECKING_FOR_DUPLICATE"
try:
# Check for duplicate in other table
position_we_vote_id = ""
organization_id = 0
organization_we_vote_id = ""
contest_office_id = 0
candidate_campaign_id = 0
contest_measure_id = 0
retrieve_position_for_friends = True if is_public_position else False # Get the opposite
voter_we_vote_id = ""
duplicate_results = self.retrieve_position(position_we_vote_id,
organization_id, organization_we_vote_id, voter_id,
contest_office_id, candidate_campaign_id, contest_measure_id,
retrieve_position_for_friends,
voter_we_vote_id,
office_we_vote_id, candidate_we_vote_id, measure_we_vote_id)
if duplicate_results['position_found']:
problem_with_duplicate = True
success = False
status = 'UPDATE_OR_CREATE_POSITION_COMMENT-EXISTING_POSITION_CHECK_FAILED'
except Exception as e:
problem_with_duplicate = True
success = False
status = 'UPDATE_OR_CREATE_POSITION_COMMENT-EXISTING_POSITION_CHECK_FAILED-EXCEPTION'
if problem_with_duplicate:
results = {
'success': success,
'status': status,
'position_we_vote_id': position_we_vote_id,
'position': voter_position_on_stage,
'position_found': voter_position_found,
'is_public_position': is_public_position
}
return results
# Update this position with new values
try:
voter_position_on_stage.statement_text = statement_text
# Make sure a google_civic_election_id is stored
if not positive_value_exists(voter_position_on_stage.google_civic_election_id):
# We want to retrieve the google_civic_election_id from the ballot item object
if positive_value_exists(voter_position_on_stage.candidate_campaign_we_vote_id):
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign_from_we_vote_id(
voter_position_on_stage.candidate_campaign_we_vote_id)
if results['candidate_campaign_found']:
candidate_campaign = results['candidate_campaign']
voter_position_on_stage.google_civic_election_id = \
candidate_campaign.google_civic_election_id
elif positive_value_exists(voter_position_on_stage.contest_measure_we_vote_id):
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure_from_we_vote_id(
voter_position_on_stage.contest_measure_we_vote_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
voter_position_on_stage.google_civic_election_id = contest_measure.google_civic_election_id
elif positive_value_exists(voter_position_on_stage.contest_office_we_vote_id):
contest_office_manager = ContestOfficeManager()
results = contest_office_manager.retrieve_contest_office_from_we_vote_id(
voter_position_on_stage.contest_office_we_vote_id)
if results['contest_office_found']:
contest_office = results['contest_office']
voter_position_on_stage.google_civic_election_id = contest_office.google_civic_election_id
if voter_position_on_stage.candidate_campaign_we_vote_id:
if not positive_value_exists(voter_position_on_stage.candidate_campaign_id):
# Heal the data, and fill in the candidate_campaign_id
candidate_campaign_manager = CandidateCampaignManager()
voter_position_on_stage.candidate_campaign_id = \
candidate_campaign_manager.fetch_candidate_campaign_id_from_we_vote_id(
voter_position_on_stage.candidate_campaign_we_vote_id)
elif voter_position_on_stage.contest_measure_we_vote_id:
if not positive_value_exists(voter_position_on_stage.contest_measure_id):
# Heal the data, and fill in the contest_measure_id
contest_measure_manager = ContestMeasureManager()
voter_position_on_stage.contest_measure_id = \
contest_measure_manager.fetch_contest_measure_id_from_we_vote_id(
voter_position_on_stage.contest_measure_we_vote_id)
if not positive_value_exists(voter_position_on_stage.voter_we_vote_id):
# Heal the data: Make sure we have a voter_we_vote_id
voter_position_on_stage.voter_we_vote_id = fetch_voter_we_vote_id_from_voter_id(voter_id)
voter_position_on_stage.save()
position_we_vote_id = voter_position_on_stage.we_vote_id
voter_position_on_stage_found = True
status = 'POSITION_COMMENT_UPDATED'
except Exception as e:
status = 'POSITION_COMMENT_COULD_NOT_BE_UPDATED'
else:
try:
# Create new
candidate_campaign_id = None
google_civic_election_id = 0
ballot_item_display_name = ""
if candidate_we_vote_id:
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign_from_we_vote_id(
candidate_we_vote_id)
if results['candidate_campaign_found']:
candidate_campaign = results['candidate_campaign']
candidate_campaign_id = candidate_campaign.id
google_civic_election_id = candidate_campaign.google_civic_election_id
ballot_item_display_name = candidate_campaign.candidate_name
contest_measure_id = None
if measure_we_vote_id:
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure_from_we_vote_id(measure_we_vote_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
contest_measure_id = contest_measure.id
google_civic_election_id = contest_measure.google_civic_election_id
ballot_item_display_name = contest_measure.measure_title
# In order to show a position publicly we need to tie the position to either organization_we_vote_id,
# public_figure_we_vote_id or candidate_we_vote_id. For now (2016-8-17) we assume organization
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_by_id(voter_id)
organization_id = 0
organization_we_vote_id = ""
voter_we_vote_id = ""
if results['voter_found']:
voter = results['voter']
voter_we_vote_id = voter.we_vote_id
organization_we_vote_id = voter.linked_organization_we_vote_id
if positive_value_exists(organization_we_vote_id):
# Look up the organization_id
organization_manager = OrganizationManager()
organization_results = organization_manager.retrieve_organization_from_we_vote_id(
voter.linked_organization_we_vote_id)
if organization_results['organization_found']:
organization = organization_results['organization']
organization_id = organization.id
speaker_display_name = organization.organization_name
# Always default to Friends only
voter_position_on_stage = PositionForFriends(
voter_id=voter_id,
voter_we_vote_id=voter_we_vote_id,
candidate_campaign_id=candidate_campaign_id,
candidate_campaign_we_vote_id=candidate_we_vote_id,
contest_measure_id=contest_measure_id,
contest_measure_we_vote_id=measure_we_vote_id,
stance=NO_STANCE,
google_civic_election_id=google_civic_election_id,
organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id,
statement_text=statement_text,
ballot_item_display_name=ballot_item_display_name,
speaker_display_name=speaker_display_name,
)
voter_position_on_stage.save()
position_we_vote_id = voter_position_on_stage.we_vote_id
voter_position_on_stage_found = True
is_public_position = False
status = 'NEW_POSITION_COMMENT_SAVED'
except Exception as e:
status = 'NEW_POSITION_COMMENT_COULD_NOT_BE_SAVED'
results = {
'status': status,
'success': True if voter_position_on_stage_found else False,
'position_we_vote_id': position_we_vote_id,
'position': voter_position_on_stage,
'is_public_position': is_public_position
}
return results
# We rely on this unique identifier: position_we_vote_id
# Pass in a value if we want it saved. Pass in "False" if we want to leave it the same.
def update_or_create_position(
self, position_we_vote_id,
organization_we_vote_id=False,
public_figure_we_vote_id=False,
voter_we_vote_id=False,
google_civic_election_id=False,
state_code=False,
ballot_item_display_name=False,
office_we_vote_id=False,
candidate_we_vote_id=False,
measure_we_vote_id=False,
stance=False,
set_as_public_position=False,
statement_text=False,
statement_html=False,
more_info_url=False,
vote_smart_time_span=False,
vote_smart_rating_id=False,
vote_smart_rating=False,
vote_smart_rating_name=False):
"""
Either update or create a position entry.
"""
exception_does_not_exist = False
exception_multiple_object_returned = False
failed_saving_existing_position = False
position_on_stage_found = False
new_position_created = False
too_many_unique_actor_variables_received = False
no_unique_actor_variables_received = False
too_many_unique_ballot_item_variables_received = False
no_unique_ballot_item_variables_received = False
if set_as_public_position:
position_on_stage_starter = PositionEntered
position_on_stage = PositionEntered()
else:
position_on_stage_starter = PositionForFriends
position_on_stage = PositionForFriends()
status = "ENTERING_UPDATE_OR_CREATE_POSITION"
position_entered_manager = PositionEnteredManager()
# In order of authority
# 1) position_id exists? Find it with position_id or fail (REMOVED)
# 2) we_vote_id exists? Find it with we_vote_id or fail
# 3-5) organization_we_vote_id related position?
# 6-8) public_figure_we_vote_id related position?
# 9-11) voter_we_vote_id related position?
success = False
if positive_value_exists(position_we_vote_id):
# If here, we know we are updating
# 1) position_id exists? Find it with position_id or fail REMOVED
# 2) we_vote_id exists? Find it with we_vote_id or fail
position_results = {
'success': False,
}
found_with_we_vote_id = False
if positive_value_exists(position_we_vote_id):
position_results = position_entered_manager.retrieve_position_from_we_vote_id(position_we_vote_id)
found_with_we_vote_id = True
if position_results['success']:
position_on_stage = position_results['position']
position_on_stage_found = True
if organization_we_vote_id:
position_on_stage.organization_we_vote_id = organization_we_vote_id
# Lookup organization_id based on organization_we_vote_id and update
organization_manager = OrganizationManager()
position_on_stage.organization_id = \
organization_manager.fetch_organization_id(organization_we_vote_id)
if google_civic_election_id:
position_on_stage.google_civic_election_id = google_civic_election_id
if state_code:
position_on_stage.state_code = state_code
if ballot_item_display_name:
position_on_stage.ballot_item_display_name = ballot_item_display_name
if office_we_vote_id:
position_on_stage.contest_office_we_vote_id = office_we_vote_id
# Lookup contest_office_id based on office_we_vote_id and update
contest_office_manager = ContestOfficeManager()
position_on_stage.contest_office_id = \
contest_office_manager.fetch_contest_office_id_from_we_vote_id(office_we_vote_id)
if candidate_we_vote_id:
position_on_stage.candidate_campaign_we_vote_id = candidate_we_vote_id
# Lookup candidate_campaign_id based on candidate_campaign_we_vote_id and update
candidate_campaign_manager = CandidateCampaignManager()
position_on_stage.candidate_campaign_id = \
candidate_campaign_manager.fetch_candidate_campaign_id_from_we_vote_id(candidate_we_vote_id)
if measure_we_vote_id:
position_on_stage.contest_measure_we_vote_id = measure_we_vote_id
# Lookup contest_measure_id based on contest_measure_we_vote_id and update
contest_measure_manager = ContestMeasureManager()
position_on_stage.contest_measure_id = \
contest_measure_manager.fetch_contest_measure_id_from_we_vote_id(measure_we_vote_id)
# if positive_value_exists(stance):
if stance:
# TODO Verify that "stance" contains a legal value
position_on_stage.stance = stance
if statement_text:
position_on_stage.statement_text = statement_text
if statement_html:
position_on_stage.statement_html = statement_html
if more_info_url:
position_on_stage.more_info_url = more_info_url
if vote_smart_time_span:
position_on_stage.vote_smart_time_span = vote_smart_time_span
if vote_smart_rating_id:
position_on_stage.vote_smart_rating_id = vote_smart_rating_id
if vote_smart_rating:
position_on_stage.vote_smart_rating = vote_smart_rating
if vote_smart_rating_name:
position_on_stage.vote_smart_rating_name = vote_smart_rating_name
# As long as at least one of the above variables has changed, then save
if organization_we_vote_id or google_civic_election_id or ballot_item_display_name or office_we_vote_id \
or candidate_we_vote_id or measure_we_vote_id or stance or statement_text \
or statement_html or more_info_url \
or vote_smart_time_span or vote_smart_rating_id or vote_smart_rating or vote_smart_rating_name:
position_on_stage.save()
success = True
if found_with_we_vote_id:
status = "POSITION_SAVED_WITH_POSITION_WE_VOTE_ID"
else:
status = "POSITION_CHANGES_SAVED"
else:
success = True
if found_with_we_vote_id:
status = "NO_POSITION_CHANGES_SAVED_WITH_POSITION_WE_VOTE_ID"
else:
status = "NO_POSITION_CHANGES_SAVED"
else:
status = "POSITION_COULD_NOT_BE_FOUND_WITH_POSITION_ID_OR_WE_VOTE_ID"
# else for this: if positive_value_exists(position_we_vote_id):
else:
# We also want to retrieve a position with the following sets of variables:
# 3) organization_we_vote_id, google_civic_election_id, candidate_we_vote_id: DONE
# 4) organization_we_vote_id, google_civic_election_id, measure_we_vote_id: DONE
# 5) organization_we_vote_id, google_civic_election_id, office_we_vote_id: DONE
# 6) TODO public_figure_we_vote_id, google_civic_election_id, office_we_vote_id
# 7) TODO public_figure_we_vote_id, google_civic_election_id, candidate_we_vote_id
# 8) TODO public_figure_we_vote_id, google_civic_election_id, measure_we_vote_id
# NOTE: Voters storing a public version of their voter guides store it as a public_figure_we_vote_id
# 9) voter_we_vote_id, google_civic_election_id, office_we_vote_id
# 10) voter_we_vote_id, google_civic_election_id, candidate_we_vote_id
# 11) voter_we_vote_id, google_civic_election_id, measure_we_vote_id
found_with_status = ''
# Make sure that too many ballot item identifier variables weren't passed in
number_of_unique_ballot_item_identifiers = 0
if positive_value_exists(candidate_we_vote_id):
number_of_unique_ballot_item_identifiers += 1
if positive_value_exists(measure_we_vote_id):
number_of_unique_ballot_item_identifiers += 1
if positive_value_exists(office_we_vote_id):
number_of_unique_ballot_item_identifiers += 1
if number_of_unique_ballot_item_identifiers > 1:
too_many_unique_ballot_item_variables_received = True
status = "FAILED-TOO_MANY_UNIQUE_BALLOT_ITEM_VARIABLES"
success = False
elif number_of_unique_ballot_item_identifiers is 0:
no_unique_ballot_item_variables_received = True
status = "FAILED-NO_UNIQUE_BALLOT_ITEM_VARIABLES_RECEIVED"
success = False
# Make sure that too many "actor" identifier variables weren't passed in
number_of_unique_actor_identifiers = 0
if positive_value_exists(organization_we_vote_id):
number_of_unique_actor_identifiers += 1
if positive_value_exists(public_figure_we_vote_id):
number_of_unique_actor_identifiers += 1
if positive_value_exists(voter_we_vote_id):
number_of_unique_actor_identifiers += 1
if number_of_unique_actor_identifiers > 1:
too_many_unique_actor_variables_received = True
status = "FAILED-TOO_MANY_UNIQUE_ACTOR_VARIABLES"
success = False
elif number_of_unique_actor_identifiers is 0:
no_unique_actor_variables_received = True
status = "FAILED-NO_UNIQUE_ACTOR_VARIABLES_RECEIVED"
success = False
# Only proceed if the correct number of unique identifiers was received
if not too_many_unique_ballot_item_variables_received and \
not too_many_unique_actor_variables_received and \
not no_unique_actor_variables_received and \
not no_unique_ballot_item_variables_received:
# 3-5: Organization-related cases
# 3) candidate_we_vote_id + organization_we_vote_id exists? Try to find it. If not, go to step 4
if positive_value_exists(candidate_we_vote_id) and \
positive_value_exists(organization_we_vote_id) and \
positive_value_exists(google_civic_election_id):
try:
organization_id = 0
# organization_we_vote_id = ""
voter_id = 0
office_id = 0
voter_we_vote_id = ""
office_we_vote_id = ""
candidate_id = 0
# candidate_we_vote_id = ""
measure_id = 0
measure_we_vote_id = ""
# google_civic_election_id = 0
results = position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id,
voter_id,
office_id, candidate_id,
measure_id,
voter_we_vote_id,
office_we_vote_id,
candidate_we_vote_id,
measure_we_vote_id,
google_civic_election_id)
if results['position_found']:
position_on_stage = results['position']
# position_on_stage = position_on_stage_starter.objects.get(
# candidate_campaign_we_vote_id=candidate_we_vote_id,
# organization_we_vote_id=organization_we_vote_id,
# google_civic_election_id=google_civic_election_id
# )
position_on_stage_found = True
found_with_status = "FOUND_WITH_CANDIDATE_AND_ORGANIZATION_WE_VOTE_ID"
except MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger)
exception_multiple_object_returned = True
status = "FAILED-MULTIPLE_FOUND_WITH_CANDIDATE_AND_ORGANIZATION_WE_VOTE_ID"
except ObjectDoesNotExist as e:
# Not a problem -- a position matching this candidate_we_vote_id wasn't found
pass
# If there wasn't a google_civic_election_id, look for vote_smart_time_span
elif positive_value_exists(candidate_we_vote_id) and \
positive_value_exists(organization_we_vote_id) and \
positive_value_exists(vote_smart_time_span):
try:
organization_id = 0
# organization_we_vote_id = ""
voter_id = 0
office_id = 0
voter_we_vote_id = ""
office_we_vote_id = ""
candidate_id = 0
# candidate_we_vote_id = ""
measure_id = 0
measure_we_vote_id = ""
google_civic_election_id = 0
# vote_smart_time_span = ""
results = position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id,
voter_id,
office_id, candidate_id,
measure_id,
voter_we_vote_id,
office_we_vote_id,
candidate_we_vote_id,
measure_we_vote_id,
google_civic_election_id,
vote_smart_time_span,
)
if results['position_found']:
position_on_stage = results['position']
# position_on_stage = position_on_stage_starter.objects.get(
# candidate_campaign_we_vote_id=candidate_we_vote_id,
# organization_we_vote_id=organization_we_vote_id,
# vote_smart_time_span=vote_smart_time_span
# )
position_on_stage_found = True
found_with_status = "FOUND_WITH_CANDIDATE_AND_ORGANIZATION_WE_VOTE_ID_WITH_TIME_SPAN"
except MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger)
exception_multiple_object_returned = True
status = "FAILED-MULTIPLE_FOUND_WITH_CANDIDATE_AND_ORGANIZATION_WE_VOTE_ID_WITH_TIME_SPAN"
except ObjectDoesNotExist as e:
# Not a problem -- a position matching this candidate_we_vote_id wasn't found
pass
# 4) measure_we_vote_id + organization_we_vote_id exists? Try to find it. If not, go to step 5
if positive_value_exists(measure_we_vote_id) and \
positive_value_exists(organization_we_vote_id) and \
positive_value_exists(google_civic_election_id):
try:
organization_id = 0
# organization_we_vote_id = ""
voter_id = 0
office_id = 0
voter_we_vote_id = ""
office_we_vote_id = ""
candidate_id = 0
candidate_we_vote_id = ""
measure_id = 0
# measure_we_vote_id = ""
# google_civic_election_id = 0
vote_smart_time_span = ""
results = position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id,
voter_id,
office_id, candidate_id,
measure_id,
voter_we_vote_id,
office_we_vote_id,
candidate_we_vote_id,
measure_we_vote_id,
google_civic_election_id,
vote_smart_time_span,
)
if results['position_found']:
position_on_stage = results['position']
# position_on_stage = position_on_stage_starter.objects.get(
# contest_measure_we_vote_id=measure_we_vote_id,
# organization_we_vote_id=organization_we_vote_id,
# google_civic_election_id=google_civic_election_id
# )
position_on_stage_found = True
found_with_status = "FOUND_WITH_MEASURE_AND_ORGANIZATION_WE_VOTE_ID"
except MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger)
exception_multiple_object_returned = True
status = "FAILED-MULTIPLE_FOUND_WITH_MEASURE_AND_ORGANIZATION_WE_VOTE_ID"
except ObjectDoesNotExist as e:
# Not a problem -- a position matching this candidate_we_vote_id wasn't found
pass
# 5) office_we_vote_id + organization_we_vote_id exists? Try to find it. If not, go to step 6
if positive_value_exists(office_we_vote_id) and \
positive_value_exists(organization_we_vote_id) and \
positive_value_exists(google_civic_election_id):
try:
organization_id = 0
# organization_we_vote_id = ""
voter_id = 0
office_id = 0
voter_we_vote_id = ""
# office_we_vote_id = ""
candidate_id = 0
candidate_we_vote_id = ""
measure_id = 0
measure_we_vote_id = ""
# google_civic_election_id = 0
vote_smart_time_span = ""
results = position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id,
voter_id,
office_id, candidate_id,
measure_id,
voter_we_vote_id,
office_we_vote_id,
candidate_we_vote_id,
measure_we_vote_id,
google_civic_election_id,
vote_smart_time_span,
)
if results['position_found']:
position_on_stage = results['position']
# position_on_stage = position_on_stage_starter.objects.get(
# contest_office_we_vote_id=office_we_vote_id,
# organization_we_vote_id=organization_we_vote_id,
# google_civic_election_id=google_civic_election_id
# )
position_on_stage_found = True
found_with_status = "FOUND_WITH_OFFICE_AND_ORGANIZATION_WE_VOTE_ID"
except MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger)
exception_multiple_object_returned = True
status = "FAILED-MULTIPLE_FOUND_WITH_OFFICE_AND_ORGANIZATION_WE_VOTE_ID"
except ObjectDoesNotExist as e:
# Not a problem -- a position matching this office_we_vote_id wasn't found
pass
# TODO Test public_figure (6-8) and voter (9-11) related cases
# 6-8: Public-Figure-related cases
# 6) candidate_we_vote_id + public_figure_we_vote_id exists? Try to find it. If not, go to step 7
if positive_value_exists(candidate_we_vote_id) and \
positive_value_exists(public_figure_we_vote_id) and \
positive_value_exists(google_civic_election_id):
try:
organization_id = 0
organization_we_vote_id = ""
voter_id = 0
office_id = 0
voter_we_vote_id = ""
office_we_vote_id = ""
candidate_id = 0
# candidate_we_vote_id = ""
measure_id = 0
measure_we_vote_id = ""
# google_civic_election_id = 0
vote_smart_time_span = ""
# TODO public_figure code needs to be added for this to work
results = position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id,
voter_id,
office_id, candidate_id,
measure_id,
voter_we_vote_id,
office_we_vote_id,
candidate_we_vote_id,
measure_we_vote_id,
google_civic_election_id,
vote_smart_time_span,
)
if results['position_found']:
position_on_stage = results['position']
# position_on_stage = position_on_stage_starter.objects.get(
# candidate_campaign_we_vote_id=candidate_we_vote_id,
# public_figure_we_vote_id=public_figure_we_vote_id,
# google_civic_election_id=google_civic_election_id
# )
position_on_stage_found = False # TODO Update when working
found_with_status = "FOUND_WITH_CANDIDATE_AND_PUBLIC_FIGURE_WE_VOTE_ID"
except MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger)
exception_multiple_object_returned = True
status = "FAILED-MULTIPLE_FOUND_WITH_CANDIDATE_AND_PUBLIC_FIGURE_WE_VOTE_ID"
except ObjectDoesNotExist as e:
# Not a problem -- a position matching this candidate_we_vote_id wasn't found
pass
# 7) measure_we_vote_id + public_figure_we_vote_id exists? Try to find it. If not, go to step 8
if positive_value_exists(measure_we_vote_id) and \
positive_value_exists(public_figure_we_vote_id) and \
positive_value_exists(google_civic_election_id):
try:
# TODO DALE replace with retrieve_position_table_unknown
position_on_stage = position_on_stage_starter.objects.get(
contest_measure_we_vote_id=measure_we_vote_id,
public_figure_we_vote_id=public_figure_we_vote_id,
google_civic_election_id=google_civic_election_id
)
position_on_stage_found = False # TODO Update when working
found_with_status = "FOUND_WITH_MEASURE_AND_PUBLIC_FIGURE_WE_VOTE_ID"
except MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger)
exception_multiple_object_returned = True
status = "FAILED-MULTIPLE_FOUND_WITH_MEASURE_AND_PUBLIC_FIGURE_WE_VOTE_ID"
except ObjectDoesNotExist as e:
# Not a problem -- a position matching this candidate_we_vote_id wasn't found
pass
# 8) office_we_vote_id + public_figure_we_vote_id exists? Try to find it. If not, go to step 9
if positive_value_exists(office_we_vote_id) and \
positive_value_exists(public_figure_we_vote_id) and \
positive_value_exists(google_civic_election_id):
try:
# TODO DALE replace with retrieve_position_table_unknown
position_on_stage = position_on_stage_starter.objects.get(
contest_office_we_vote_id=office_we_vote_id,
public_figure_we_vote_id=public_figure_we_vote_id,
google_civic_election_id=google_civic_election_id
)
position_on_stage_found = False # TODO Update when public_figure working
found_with_status = "FOUND_WITH_OFFICE_AND_VOTER_WE_VOTE_ID"
except MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger)
exception_multiple_object_returned = True
status = "FAILED-MULTIPLE_FOUND_WITH_OFFICE_AND_VOTER_WE_VOTE_ID"
except ObjectDoesNotExist as e:
# Not a problem -- a position matching this office_we_vote_id wasn't found
pass
# 9-11: Voter-related cases
# 9) candidate_we_vote_id + organization_we_vote_id exists? Try to find it. If not, go to step 10
if positive_value_exists(candidate_we_vote_id) and \
positive_value_exists(voter_we_vote_id) and \
positive_value_exists(google_civic_election_id):
try:
organization_id = 0
organization_we_vote_id = ""
voter_id = 0
office_id = 0
# voter_we_vote_id = ""
office_we_vote_id = ""
candidate_id = 0
# candidate_we_vote_id = ""
measure_id = 0
measure_we_vote_id = ""
# google_civic_election_id = 0
vote_smart_time_span = ""
results = position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id,
voter_id,
office_id, candidate_id,
measure_id,
voter_we_vote_id,
office_we_vote_id,
candidate_we_vote_id,
measure_we_vote_id,
google_civic_election_id,
vote_smart_time_span,
)
if results['position_found']:
position_on_stage = results['position']
# position_on_stage = position_on_stage_starter.objects.get(
# candidate_campaign_we_vote_id=candidate_we_vote_id,
# voter_we_vote_id=voter_we_vote_id,
# google_civic_election_id=google_civic_election_id
# )
position_on_stage_found = True
found_with_status = "FOUND_WITH_CANDIDATE_AND_VOTER_WE_VOTE_ID"
except MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger)
exception_multiple_object_returned = True
status = "FAILED-MULTIPLE_FOUND_WITH_CANDIDATE_AND_VOTER_WE_VOTE_ID"
except ObjectDoesNotExist as e:
# Not a problem -- a position matching this candidate_we_vote_id wasn't found
pass
# 10) measure_we_vote_id + voter_we_vote_id exists? Try to find it. If not, go to step 11
if positive_value_exists(measure_we_vote_id) and \
positive_value_exists(voter_we_vote_id) and \
positive_value_exists(google_civic_election_id):
try:
organization_id = 0
organization_we_vote_id = ""
voter_id = 0
office_id = 0
# voter_we_vote_id = ""
office_we_vote_id = ""
candidate_id = 0
candidate_we_vote_id = ""
measure_id = 0
# measure_we_vote_id = ""
# google_civic_election_id = 0
vote_smart_time_span = ""
results = position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id,
voter_id,
office_id, candidate_id,
measure_id,
voter_we_vote_id,
office_we_vote_id,
candidate_we_vote_id,
measure_we_vote_id,
google_civic_election_id,
vote_smart_time_span,
)
if results['position_found']:
position_on_stage = results['position']
# position_on_stage = position_on_stage_starter.objects.get(
# contest_measure_we_vote_id=measure_we_vote_id,
# voter_we_vote_id=voter_we_vote_id,
# google_civic_election_id=google_civic_election_id
# )
position_on_stage_found = True
found_with_status = "FOUND_WITH_MEASURE_AND_VOTER_WE_VOTE_ID"
except MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger)
exception_multiple_object_returned = True
status = "FAILED-MULTIPLE_FOUND_WITH_MEASURE_AND_VOTER_WE_VOTE_ID"
except ObjectDoesNotExist as e:
# Not a problem -- a position matching this candidate_we_vote_id wasn't found
pass
# 11) office_we_vote_id + organization_we_vote_id exists? Try to find it.
if positive_value_exists(office_we_vote_id) and \
positive_value_exists(voter_we_vote_id) and \
positive_value_exists(google_civic_election_id):
try:
organization_id = 0
organization_we_vote_id = ""
voter_id = 0
office_id = 0
# voter_we_vote_id = ""
# office_we_vote_id = ""
candidate_id = 0
candidate_we_vote_id = ""
measure_id = 0
measure_we_vote_id = ""
# google_civic_election_id = 0
vote_smart_time_span = ""
results = position_entered_manager.retrieve_position_table_unknown(
position_we_vote_id, organization_id, organization_we_vote_id,
voter_id,
office_id, candidate_id,
measure_id,
voter_we_vote_id,
office_we_vote_id,
candidate_we_vote_id,
measure_we_vote_id,
google_civic_election_id,
vote_smart_time_span,
)
if results['position_found']:
position_on_stage = results['position']
# position_on_stage = position_on_stage_starter.objects.get(
# contest_office_we_vote_id=office_we_vote_id,
# voter_we_vote_id=voter_we_vote_id,
# google_civic_election_id=google_civic_election_id
# )
position_on_stage_found = True
found_with_status = "FOUND_WITH_OFFICE_AND_VOTER_WE_VOTE_ID"
except MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger)
exception_multiple_object_returned = True
status = "FAILED-MULTIPLE_FOUND_WITH_OFFICE_AND_VOTER_WE_VOTE_ID"
except ObjectDoesNotExist as e:
# Not a problem -- a position matching this office wasn't found
pass
# Save values entered in steps 3-11
if position_on_stage_found:
try:
if ballot_item_display_name or stance or statement_text or statement_html or more_info_url:
if ballot_item_display_name:
position_on_stage.ballot_item_display_name = ballot_item_display_name
if stance:
position_on_stage.stance = stance
if statement_text:
position_on_stage.statement_text = statement_text
if statement_html:
position_on_stage.statement_html = statement_html
if more_info_url:
position_on_stage.more_info_url = more_info_url
position_on_stage.save()
success = True
status = found_with_status + " SAVED"
else:
success = True
status = found_with_status + " NO_CHANGES_SAVED"
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
failed_saving_existing_position = True
if not position_on_stage_found \
and not exception_multiple_object_returned \
and not failed_saving_existing_position \
and not too_many_unique_ballot_item_variables_received and not too_many_unique_actor_variables_received:
try:
# If here, create new position
# Some functions pass in these values with "False" if we don't want to update the value. Because of
# this we want to change the value away from "False" when we create a new entry.
speaker_display_name = ""
candidate_campaign_id = None
if candidate_we_vote_id:
candidate_campaign_manager = CandidateCampaignManager()
results = candidate_campaign_manager.retrieve_candidate_campaign_from_we_vote_id(
candidate_we_vote_id)
if results['candidate_campaign_found']:
candidate_campaign = results['candidate_campaign']
candidate_campaign_id = candidate_campaign.id
google_civic_election_id = candidate_campaign.google_civic_election_id
ballot_item_display_name = candidate_campaign.candidate_name
else:
# We don't need to ever look up the candidate_we_vote_id from the candidate_campaign_id
candidate_we_vote_id = None
contest_measure_id = None
if measure_we_vote_id:
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure_from_we_vote_id(measure_we_vote_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
contest_measure_id = contest_measure.id
google_civic_election_id = contest_measure.google_civic_election_id
ballot_item_display_name = contest_measure.measure_title
else:
# We don't need to ever look up the measure_we_vote_id from the contest_measure_id
measure_we_vote_id = None
contest_office_id = None
if office_we_vote_id:
contest_office_manager = ContestOfficeManager()
results = contest_office_manager.retrieve_contest_office_from_we_vote_id(office_we_vote_id)
if results['contest_office_found']:
contest_office = results['contest_office']
contest_office_id = contest_office.id
google_civic_election_id = contest_office.google_civic_election_id
ballot_item_display_name = contest_office.office_name
else:
# We don't need to ever look up the office_we_vote_id from the contest_office_id
office_we_vote_id = None
if google_civic_election_id is False:
google_civic_election_id = None
if state_code is False:
state_code = None
if ballot_item_display_name is False:
ballot_item_display_name = None
if stance not in(SUPPORT, NO_STANCE, INFORMATION_ONLY, STILL_DECIDING, OPPOSE, PERCENT_RATING):
stance = NO_STANCE
if statement_text is False:
statement_text = None
if statement_html is False:
statement_html = None
if more_info_url is False:
more_info_url = None
if vote_smart_time_span is False:
vote_smart_time_span = None
if vote_smart_rating_id is False:
vote_smart_rating_id = None
if vote_smart_rating is False:
vote_smart_rating = None
if vote_smart_rating_name is False:
vote_smart_rating_name = None
# In order to show a position publicly we need to tie the position to either organization_we_vote_id,
# public_figure_we_vote_id or candidate_we_vote_id. For now (2016-8-17) we assume organization
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_by_we_vote_id(voter_we_vote_id)
organization_id = 0
organization_we_vote_id = ""
voter_id = 0
if results['voter_found']:
voter = results['voter']
voter_id = voter.id
organization_we_vote_id = voter.linked_organization_we_vote_id
if positive_value_exists(organization_we_vote_id):
# Look up the organization_id
organization_manager = OrganizationManager()
organization_results = organization_manager.retrieve_organization_from_we_vote_id(
voter.linked_organization_we_vote_id)
if organization_results['organization_found']:
organization = organization_results['organization']
organization_id = organization.id
speaker_display_name = organization.organization_name
position_on_stage = position_on_stage_starter.objects.create(
organization_we_vote_id=organization_we_vote_id,
organization_id=organization_id,
voter_we_vote_id=voter_we_vote_id,
voter_id=voter_id,
google_civic_election_id=google_civic_election_id,
state_code=state_code,
ballot_item_display_name=ballot_item_display_name,
speaker_display_name=speaker_display_name,
contest_office_we_vote_id=office_we_vote_id,
contest_office_id=contest_office_id,
candidate_campaign_we_vote_id=candidate_we_vote_id,
candidate_campaign_id=candidate_campaign_id,
contest_measure_we_vote_id=measure_we_vote_id,
contest_measure_id=contest_measure_id,
stance=stance,
statement_text=statement_text,
statement_html=statement_html,
more_info_url=more_info_url,
vote_smart_time_span=vote_smart_time_span,
vote_smart_rating_id=vote_smart_rating_id,
vote_smart_rating=vote_smart_rating,
vote_smart_rating_name=vote_smart_rating_name
)
status = "CREATE_POSITION_SUCCESSFUL"
success = True
new_position_created = True
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
success = False
status = "NEW_POSITION_COULD_NOT_BE_CREATED"
if set_as_public_position:
position_on_stage = PositionEntered()
else:
position_on_stage = PositionForFriends()
results = {
'success': success,
'status': status,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
'position': position_on_stage,
'new_position_created': new_position_created,
}
return results
def refresh_cached_position_info(self, position_object):
"""
The position tables cache information from other tables. This function reaches out to the source tables
and copies over the latest information to the position tables.
:param position_object:
:return:
"""
position_change = False
# Start with "speaker" information (Organization, Voter, or Public Figure)
if positive_value_exists(position_object.organization_we_vote_id):
if not positive_value_exists(position_object.speaker_display_name) \
or not positive_value_exists(position_object.speaker_image_url_https) \
or not positive_value_exists(position_object.speaker_twitter_handle):
try:
# We need to look in the organization table for speaker_display_name & speaker_image_url_https
organization_manager = OrganizationManager()
organization_id = 0
results = organization_manager.retrieve_organization(organization_id,
position_object.organization_we_vote_id)
if results['organization_found']:
organization = results['organization']
if not positive_value_exists(position_object.speaker_display_name):
# speaker_display_name is missing so look it up from source
position_object.speaker_display_name = organization.organization_name
position_change = True
if not positive_value_exists(position_object.speaker_image_url_https):
# speaker_image_url_https is missing so look it up from source
position_object.speaker_image_url_https = organization.organization_photo_url()
position_change = True
if not positive_value_exists(position_object.speaker_twitter_handle):
# speaker_twitter_handle is missing so look it up from source
position_object.speaker_twitter_handle = organization.organization_twitter_handle
position_change = True
except Exception as e:
pass
elif positive_value_exists(position_object.voter_id):
if not positive_value_exists(position_object.speaker_display_name) \
or not positive_value_exists(position_object.voter_we_vote_id) \
or not positive_value_exists(position_object.speaker_image_url_https) \
or not positive_value_exists(position_object.speaker_twitter_handle):
try:
# We need to look in the voter table for speaker_display_name
voter_manager = VoterManager()
results = voter_manager.retrieve_voter_by_id(position_object.voter_id)
if results['voter_found']:
voter = results['voter']
if not positive_value_exists(position_object.speaker_display_name):
# speaker_display_name is missing so look it up from source
position_object.speaker_display_name = voter.get_full_name()
position_change = True
if not positive_value_exists(position_object.voter_we_vote_id):
# speaker_we_vote_id is missing so look it up from source
position_object.voter_we_vote_id = voter.we_vote_id
position_change = True
if not positive_value_exists(position_object.speaker_image_url_https):
# speaker_image_url_https is missing so look it up from source
position_object.speaker_image_url_https = voter.voter_photo_url()
position_change = True
if not positive_value_exists(position_object.speaker_twitter_handle):
# speaker_twitter_handle is missing so look it up from source
position_object.speaker_twitter_handle = voter.twitter_screen_name
position_change = True
except Exception as e:
pass
elif positive_value_exists(position_object.public_figure_we_vote_id):
pass
# Now move onto "ballot_item" information
# Candidate
check_for_missing_office_data = False
candidate_campaign_manager = CandidateCampaignManager()
contest_office_id = 0
contest_office_we_vote_id = ""
if positive_value_exists(position_object.candidate_campaign_id) or \
positive_value_exists(position_object.candidate_campaign_we_vote_id):
check_for_missing_office_data = True # We check separately
if not positive_value_exists(position_object.ballot_item_display_name) \
or not positive_value_exists(position_object.ballot_item_image_url_https) \
or not positive_value_exists(position_object.ballot_item_twitter_handle) \
or not positive_value_exists(position_object.state_code) \
or not positive_value_exists(position_object.political_party) \
or not positive_value_exists(position_object.politician_id) \
or not positive_value_exists(position_object.politician_we_vote_id):
try:
# We need to look in the voter table for speaker_display_name
results = candidate_campaign_manager.retrieve_candidate_campaign(
position_object.candidate_campaign_id, position_object.candidate_campaign_we_vote_id)
if results['candidate_campaign_found']:
candidate = results['candidate_campaign']
# Cache for further down
contest_office_id = candidate.contest_office_id
contest_office_we_vote_id = candidate.contest_office_we_vote_id
if not positive_value_exists(position_object.contest_office_id):
position_object.contest_office_id = contest_office_id
position_change = True
if not positive_value_exists(position_object.contest_office_we_vote_id):
position_object.contest_office_we_vote_id = contest_office_we_vote_id
position_change = True
if not positive_value_exists(position_object.ballot_item_display_name):
# ballot_item_display_name is missing so look it up from source
position_object.ballot_item_display_name = candidate.display_candidate_name()
position_change = True
if not positive_value_exists(position_object.ballot_item_image_url_https):
# ballot_item_image_url_https is missing so look it up from source
position_object.ballot_item_image_url_https = candidate.candidate_photo_url()
position_change = True
if not positive_value_exists(position_object.ballot_item_twitter_handle):
# ballot_item_image_twitter_handle is missing so look it up from source
position_object.ballot_item_twitter_handle = candidate.candidate_twitter_handle
position_change = True
if not positive_value_exists(position_object.state_code):
# state_code is missing so look it up from source
position_object.state_code = candidate.get_candidate_state()
position_change = True
if not positive_value_exists(position_object.political_party):
# political_party is missing so look it up from source
position_object.political_party = candidate.political_party_display()
position_change = True
if not positive_value_exists(position_object.politician_id):
# politician_id is missing so look it up from source
position_object.politician_id = candidate.politician_id
position_change = True
if not positive_value_exists(position_object.politician_we_vote_id):
# politician_we_vote_id is missing so look it up from source
position_object.politician_we_vote_id = candidate.politician_we_vote_id
position_change = True
except Exception as e:
pass
# Measure
elif positive_value_exists(position_object.contest_measure_id) or \
positive_value_exists(position_object.contest_measure_we_vote_id):
if not positive_value_exists(position_object.ballot_item_display_name) \
or position_object.ballot_item_display_name == "None" \
or positive_value_exists(position_object.ballot_item_image_url_https) \
or positive_value_exists(position_object.ballot_item_twitter_handle) \
or not positive_value_exists(position_object.state_code):
try:
# We need to look in the voter table for speaker_display_name
contest_measure_manager = ContestMeasureManager()
results = contest_measure_manager.retrieve_contest_measure(
position_object.contest_measure_id, position_object.contest_measure_we_vote_id)
if results['contest_measure_found']:
contest_measure = results['contest_measure']
if not positive_value_exists(position_object.ballot_item_display_name) \
or position_object.ballot_item_display_name == "None":
# ballot_item_display_name is missing so look it up from source
position_object.ballot_item_display_name = contest_measure.measure_title
position_change = True
if positive_value_exists(position_object.ballot_item_image_url_https):
# ballot_item_image_url_https should not exist for measures
position_object.ballot_item_image_url_https = ""
position_change = True
if positive_value_exists(position_object.ballot_item_twitter_handle):
# ballot_item_image_twitter_handle should not exist for measures
position_object.ballot_item_twitter_handle = ""
position_change = True
if not positive_value_exists(position_object.state_code):
# state_code is missing so look it up from source
position_object.state_code = contest_measure.state_code
position_change = True
except Exception as e:
pass
# Office - We are only here if NOT a candidate and NOT a measure
elif positive_value_exists(position_object.contest_office_id) or \
positive_value_exists(position_object.contest_office_we_vote_id):
check_for_missing_office_data = True
if check_for_missing_office_data:
if not positive_value_exists(position_object.contest_office_id) \
or not positive_value_exists(position_object.contest_office_we_vote_id) \
or not positive_value_exists(position_object.contest_office_name):
if not positive_value_exists(position_object.contest_office_id) \
and not positive_value_exists(position_object.contest_office_we_vote_id):
if not contest_office_id or not contest_office_we_vote_id:
# If here we need to get the contest_office identifier from the candidate
candidate_results = candidate_campaign_manager.retrieve_candidate_campaign_from_we_vote_id(
position_object.candidate_campaign_we_vote_id)
if candidate_results['candidate_campaign_found']:
candidate = candidate_results['candidate_campaign']
position_object.contest_office_id = candidate.contest_office_id
position_object.contest_office_we_vote_id = candidate.contest_office_we_vote_id
position_change = True
else:
position_object.contest_office_id = contest_office_id
position_object.contest_office_we_vote_id = contest_office_we_vote_id
position_change = True
office_found = False
contest_office_manager = ContestOfficeManager()
if positive_value_exists(position_object.contest_office_id):
results = contest_office_manager.retrieve_contest_office_from_id(position_object.contest_office_id)
office_found = results['contest_office_found']
elif positive_value_exists(position_object.contest_office_we_vote_id):
results = contest_office_manager.retrieve_contest_office_from_we_vote_id(
position_object.contest_office_we_vote_id)
office_found = results['contest_office_found']
if office_found:
office_object = results['contest_office']
if not positive_value_exists(position_object.contest_office_id):
position_object.contest_office_id = office_object.id
position_change = True
if not positive_value_exists(position_object.contest_office_we_vote_id):
position_object.contest_office_we_vote_id = office_object.we_vote_id
position_change = True
if not positive_value_exists(position_object.contest_office_name):
position_object.contest_office_name = office_object.office_name
position_change = True
if position_change:
position_object.save()
return position_object
| 56.252597
| 121
| 0.629189
| 26,901
| 238,286
| 5.130441
| 0.026244
| 0.045734
| 0.058835
| 0.036062
| 0.875158
| 0.832771
| 0.803947
| 0.77411
| 0.741874
| 0.716493
| 0
| 0.00316
| 0.322679
| 238,286
| 4,235
| 122
| 56.26588
| 0.851967
| 0.169532
| 0
| 0.70935
| 0
| 0
| 0.059991
| 0.028494
| 0
| 0
| 0
| 0.001417
| 0
| 1
| 0.02599
| false
| 0.006656
| 0.005071
| 0.001268
| 0.116323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ae90d162b15880f4ca61120e7daeace1d782e6ee
| 24,363
|
py
|
Python
|
rpython/translator/mu/test_impl/test_binop.py
|
wdv4758h/mu-client-pypy
|
d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
rpython/translator/mu/test_impl/test_binop.py
|
wdv4758h/mu-client-pypy
|
d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
rpython/translator/mu/test_impl/test_binop.py
|
wdv4758h/mu-client-pypy
|
d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
from impl_test_util import impl_jit_test
def test_add(cmdopt):
def build_test_bundle(bldr, rmu):
"""
Builds the following test bundle.
.typedef @i8 = int<8>
.const @0xff_i8 <@i8> = 0xff
.const @0x0a_i8 <@i8> = 0x0a
.funcsig @sig__i8 = () -> (@i8)
.funcdecl @test_fnc <@fnrsig__i8>
.funcdef @test_fnc VERSION @test_fnc_v1 <@sig__i8> {
@test_fnc_v1.blk0():
@test_fnc_v1.blk0.res = ADD <@i8> @0xff_i8 @0x0a_i8
RET @test_fnc_v1.blk0.res
}
:return: (rmu.MuVM(), rmu.MuCtx, rmu.MuIRBuilder, MuID, MuID)
"""
i8 = bldr.gen_sym("@i8")
bldr.new_type_int(i8, 8)
c_0xff_i8 = bldr.gen_sym("@0xff_i8")
bldr.new_const_int(c_0xff_i8, i8, 0xff)
c_0x0a_i8 = bldr.gen_sym("@0x0a_i8")
bldr.new_const_int(c_0x0a_i8, i8, 0x0a)
sig__i8 = bldr.gen_sym("@sig__i8")
bldr.new_funcsig(sig__i8, [], [i8])
test_fnc = bldr.gen_sym("@test_fnc")
bldr.new_func(test_fnc, sig__i8)
# function body
v1 = bldr.gen_sym("@test_fnc_v1")
blk0 = bldr.gen_sym("@test_fnc_v1.blk0")
res = bldr.gen_sym("@test_fnc_v1.blk0.res")
op_binop = bldr.gen_sym()
bldr.new_binop(op_binop, res, rmu.MuBinOptr.ADD, i8, c_0xff_i8, c_0x0a_i8)
op_ret = bldr.gen_sym()
bldr.new_ret(op_ret, [res])
bldr.new_bb(blk0, [], [], rmu.MU_NO_ID, [op_binop, op_ret])
bldr.new_func_ver(v1, test_fnc, [blk0])
return {
"@i8": i8,
"test_fnc_sig": sig__i8,
"test_fnc": test_fnc,
"result_type": i8
}
res = impl_jit_test(cmdopt, build_test_bundle)
if cmdopt.run:
assert res == 9
def test_sub(cmdopt):
def build_test_bundle(bldr, rmu):
"""
Builds the following test bundle.
.typedef @i8 = int<8>
.const @0xff_i8 <@i8> = 0xff
.const @0x0a_i8 <@i8> = 0x0a
.funcsig @sig__i8 = () -> (@i8)
.funcdecl @test_fnc <@fnrsig__i8>
.funcdef @test_fnc VERSION @test_fnc_v1 <@sig__i8> {
@test_fnc_v1.blk0():
@test_fnc_v1.blk0.res = SUB <@i8> @0x0a_i8 @0xff_i8
RET @test_fnc_v1.blk0.res
}
:return: (rmu.MuVM(), rmu.MuCtx, rmu.MuIRBuilder, MuID, MuID)
"""
i8 = bldr.gen_sym("@i8")
bldr.new_type_int(i8, 8)
c_0xff_i8 = bldr.gen_sym("@0xff_i8")
bldr.new_const_int(c_0xff_i8, i8, 0xff)
c_0x0a_i8 = bldr.gen_sym("@0x0a_i8")
bldr.new_const_int(c_0x0a_i8, i8, 0x0a)
sig__i8 = bldr.gen_sym("@sig__i8")
bldr.new_funcsig(sig__i8, [], [i8])
test_fnc = bldr.gen_sym("@test_fnc")
bldr.new_func(test_fnc, sig__i8)
# function body
v1 = bldr.gen_sym("@test_fnc_v1")
blk0 = bldr.gen_sym("@test_fnc_v1.blk0")
res = bldr.gen_sym("@test_fnc_v1.blk0.res")
op_binop = bldr.gen_sym()
bldr.new_binop(op_binop, res, rmu.MuBinOptr.SUB, i8, c_0x0a_i8, c_0xff_i8)
op_ret = bldr.gen_sym()
bldr.new_ret(op_ret, [res])
bldr.new_bb(blk0, [], [], rmu.MU_NO_ID, [op_binop, op_ret])
bldr.new_func_ver(v1, test_fnc, [blk0])
return {
"@i8": i8,
"test_fnc_sig": sig__i8,
"test_fnc": test_fnc,
"result_type": i8
}
res = impl_jit_test(cmdopt, build_test_bundle)
if cmdopt.run:
assert res == 11
def test_mul(cmdopt):
def build_test_bundle(bldr, rmu):
"""
Builds the following test bundle.
.typedef @i8 = int<8>
.const @0xff_i8 <@i8> = 0xff
.const @0x0a_i8 <@i8> = 0x0a
.funcsig @sig__i8 = () -> (@i8)
.funcdecl @test_fnc <@fnrsig__i8>
.funcdef @test_fnc VERSION @test_fnc_v1 <@sig__i8> {
@test_fnc_v1.blk0():
@test_fnc_v1.blk0.res = MUL <@i8> @0xff_i8 @0x0a_i8
RET @test_fnc_v1.blk0.res
}
:return: (rmu.MuVM(), rmu.MuCtx, rmu.MuIRBuilder, MuID, MuID)
"""
i8 = bldr.gen_sym("@i8")
bldr.new_type_int(i8, 8)
c_0xff_i8 = bldr.gen_sym("@0xff_i8")
bldr.new_const_int(c_0xff_i8, i8, 0xff)
c_0x0a_i8 = bldr.gen_sym("@0x0a_i8")
bldr.new_const_int(c_0x0a_i8, i8, 0x0a)
sig__i8 = bldr.gen_sym("@sig__i8")
bldr.new_funcsig(sig__i8, [], [i8])
test_fnc = bldr.gen_sym("@test_fnc")
bldr.new_func(test_fnc, sig__i8)
# function body
v1 = bldr.gen_sym("@test_fnc_v1")
blk0 = bldr.gen_sym("@test_fnc_v1.blk0")
res = bldr.gen_sym("@test_fnc_v1.blk0.res")
op_binop = bldr.gen_sym()
bldr.new_binop(op_binop, res, rmu.MuBinOptr.MUL, i8, c_0xff_i8, c_0x0a_i8)
op_ret = bldr.gen_sym()
bldr.new_ret(op_ret, [res])
bldr.new_bb(blk0, [], [], rmu.MU_NO_ID, [op_binop, op_ret])
bldr.new_func_ver(v1, test_fnc, [blk0])
return {
"@i8": i8,
"test_fnc_sig": sig__i8,
"test_fnc": test_fnc,
"result_type": i8
}
res = impl_jit_test(cmdopt, build_test_bundle)
if cmdopt.run:
assert res == 0xf6
def test_udiv(cmdopt):
def build_test_bundle(bldr, rmu):
"""
Builds the following test bundle.
.typedef @i8 = int<8>
.const @0x80_i8 <@i8> = 0x80
.const @0x0a_i8 <@i8> = 0x0a
.funcsig @sig__i8 = () -> (@i8)
.funcdecl @test_fnc <@fnrsig__i8>
.funcdef @test_fnc VERSION @test_fnc_v1 <@sig__i8> {
@test_fnc_v1.blk0():
@test_fnc_v1.blk0.res = UDIV <@i8> @0x80_i8 @0x0a_i8
RET @test_fnc_v1.blk0.res
}
:return: (rmu.MuVM(), rmu.MuCtx, rmu.MuIRBuilder, MuID, MuID)
"""
i8 = bldr.gen_sym("@i8")
bldr.new_type_int(i8, 8)
c_0x80_i8 = bldr.gen_sym("@0x80_i8")
bldr.new_const_int(c_0x80_i8, i8, 0x80)
c_0x0a_i8 = bldr.gen_sym("@0x0a_i8")
bldr.new_const_int(c_0x0a_i8, i8, 0x0a)
sig__i8 = bldr.gen_sym("@sig__i8")
bldr.new_funcsig(sig__i8, [], [i8])
test_fnc = bldr.gen_sym("@test_fnc")
bldr.new_func(test_fnc, sig__i8)
# function body
v1 = bldr.gen_sym("@test_fnc_v1")
blk0 = bldr.gen_sym("@test_fnc_v1.blk0")
res = bldr.gen_sym("@test_fnc_v1.blk0.res")
op_binop = bldr.gen_sym()
bldr.new_binop(op_binop, res, rmu.MuBinOptr.UDIV, i8, c_0x80_i8, c_0x0a_i8)
op_ret = bldr.gen_sym()
bldr.new_ret(op_ret, [res])
bldr.new_bb(blk0, [], [], rmu.MU_NO_ID, [op_binop, op_ret])
bldr.new_func_ver(v1, test_fnc, [blk0])
return {
"@i8": i8,
"test_fnc_sig": sig__i8,
"test_fnc": test_fnc,
"result_type": i8
}
res = impl_jit_test(cmdopt, build_test_bundle)
if cmdopt.run:
assert res == 12
def test_sdiv(cmdopt):
def build_test_bundle(bldr, rmu):
"""
Builds the following test bundle.
.typedef @i8 = int<8>
.const @0x80_i8 <@i8> = 0x80
.const @0x0a_i8 <@i8> = 0x0a
.funcsig @sig__i8 = () -> (@i8)
.funcdecl @test_fnc <@fnrsig__i8>
.funcdef @test_fnc VERSION @test_fnc_v1 <@sig__i8> {
@test_fnc_v1.blk0():
@test_fnc_v1.blk0.res = SDIV <@i8> @0x80_i8 @0x0a_i8
RET @test_fnc_v1.blk0.res
}
:return: (rmu.MuVM(), rmu.MuCtx, rmu.MuIRBuilder, MuID, MuID)
"""
i8 = bldr.gen_sym("@i8")
bldr.new_type_int(i8, 8)
c_0x80_i8 = bldr.gen_sym("@0x80_i8")
bldr.new_const_int(c_0x80_i8, i8, 0x80)
c_0x0a_i8 = bldr.gen_sym("@0x0a_i8")
bldr.new_const_int(c_0x0a_i8, i8, 0x0a)
sig__i8 = bldr.gen_sym("@sig__i8")
bldr.new_funcsig(sig__i8, [], [i8])
test_fnc = bldr.gen_sym("@test_fnc")
bldr.new_func(test_fnc, sig__i8)
# function body
v1 = bldr.gen_sym("@test_fnc_v1")
blk0 = bldr.gen_sym("@test_fnc_v1.blk0")
res = bldr.gen_sym("@test_fnc_v1.blk0.res")
op_binop = bldr.gen_sym()
bldr.new_binop(op_binop, res, rmu.MuBinOptr.SDIV, i8, c_0x80_i8, c_0x0a_i8)
op_ret = bldr.gen_sym()
bldr.new_ret(op_ret, [res])
bldr.new_bb(blk0, [], [], rmu.MU_NO_ID, [op_binop, op_ret])
bldr.new_func_ver(v1, test_fnc, [blk0])
return {
"@i8": i8,
"test_fnc_sig": sig__i8,
"test_fnc": test_fnc,
"result_type": i8
}
res = impl_jit_test(cmdopt, build_test_bundle)
if cmdopt.run:
assert res == 0xf4
def test_srem(cmdopt):
def build_test_bundle(bldr, rmu):
"""
Builds the following test bundle.
.typedef @i8 = int<8>
.const @0xff_i8 <@i8> = 0xff
.const @0x0a_i8 <@i8> = 0x0a
.funcsig @sig__i8 = () -> (@i8)
.funcdecl @test_fnc <@fnrsig__i8>
.funcdef @test_fnc VERSION @test_fnc_v1 <@sig__i8> {
@test_fnc_v1.blk0():
@test_fnc_v1.blk0.res = SREM <@i8> @0xff_i8 @0x0a_i8
RET @test_fnc_v1.blk0.res
}
:return: (rmu.MuVM(), rmu.MuCtx, rmu.MuIRBuilder, MuID, MuID)
"""
i8 = bldr.gen_sym("@i8")
bldr.new_type_int(i8, 8)
c_0xff_i8 = bldr.gen_sym("@0xff_i8")
bldr.new_const_int(c_0xff_i8, i8, 0xff)
c_0x0a_i8 = bldr.gen_sym("@0x0a_i8")
bldr.new_const_int(c_0x0a_i8, i8, 0x0a)
sig__i8 = bldr.gen_sym("@sig__i8")
bldr.new_funcsig(sig__i8, [], [i8])
test_fnc = bldr.gen_sym("@test_fnc")
bldr.new_func(test_fnc, sig__i8)
# function body
v1 = bldr.gen_sym("@test_fnc_v1")
blk0 = bldr.gen_sym("@test_fnc_v1.blk0")
res = bldr.gen_sym("@test_fnc_v1.blk0.res")
op_binop = bldr.gen_sym()
bldr.new_binop(op_binop, res, rmu.MuBinOptr.SREM, i8, c_0xff_i8, c_0x0a_i8)
op_ret = bldr.gen_sym()
bldr.new_ret(op_ret, [res])
bldr.new_bb(blk0, [], [], rmu.MU_NO_ID, [op_binop, op_ret])
bldr.new_func_ver(v1, test_fnc, [blk0])
return {
"@i8": i8,
"test_fnc_sig": sig__i8,
"test_fnc": test_fnc,
"result_type": i8
}
res = impl_jit_test(cmdopt, build_test_bundle)
if cmdopt.run:
assert res == 0xff
def test_urem(cmdopt):
def build_test_bundle(bldr, rmu):
"""
Builds the following test bundle.
.typedef @i8 = int<8>
.const @0xff_i8 <@i8> = 0xff
.const @0x0a_i8 <@i8> = 0x0a
.funcsig @sig__i8 = () -> (@i8)
.funcdecl @test_fnc <@fnrsig__i8>
.funcdef @test_fnc VERSION @test_fnc_v1 <@sig__i8> {
@test_fnc_v1.blk0():
@test_fnc_v1.blk0.res = UREM <@i8> @0xff_i8 @0x0a_i8
RET @test_fnc_v1.blk0.res
}
:return: (rmu.MuVM(), rmu.MuCtx, rmu.MuIRBuilder, MuID, MuID)
"""
i8 = bldr.gen_sym("@i8")
bldr.new_type_int(i8, 8)
c_0xff_i8 = bldr.gen_sym("@0xff_i8")
bldr.new_const_int(c_0xff_i8, i8, 0xff)
c_0x0a_i8 = bldr.gen_sym("@0x0a_i8")
bldr.new_const_int(c_0x0a_i8, i8, 0x0a)
sig__i8 = bldr.gen_sym("@sig__i8")
bldr.new_funcsig(sig__i8, [], [i8])
test_fnc = bldr.gen_sym("@test_fnc")
bldr.new_func(test_fnc, sig__i8)
# function body
v1 = bldr.gen_sym("@test_fnc_v1")
blk0 = bldr.gen_sym("@test_fnc_v1.blk0")
res = bldr.gen_sym("@test_fnc_v1.blk0.res")
op_binop = bldr.gen_sym()
bldr.new_binop(op_binop, res, rmu.MuBinOptr.UREM, i8, c_0xff_i8, c_0x0a_i8)
op_ret = bldr.gen_sym()
bldr.new_ret(op_ret, [res])
bldr.new_bb(blk0, [], [], rmu.MU_NO_ID, [op_binop, op_ret])
bldr.new_func_ver(v1, test_fnc, [blk0])
return {
"@i8": i8,
"test_fnc_sig": sig__i8,
"test_fnc": test_fnc,
"result_type": i8
}
res = impl_jit_test(cmdopt, build_test_bundle)
if cmdopt.run:
assert res == 5
def test_shl(cmdopt):
def build_test_bundle(bldr, rmu):
"""
Builds the following test bundle.
.typedef @i64 = int<64>
.const @0x6d9f9c1d58324b55_i64 <@i64> = 0x6d9f9c1d58324b55
.const @0x0a_i64 <@i64> = 0x0a
.funcsig @sig__i64 = () -> (@i64)
.funcdecl @fnc <@fnrsig__i64>
.funcdef @fnc VERSION @fnc_v1 <@sig__i64> {
@fnc_v1.blk0():
@fnc_v1.blk0.res = SHL <@i64> @0x6d9f9c1d58324b55 @0x0a_i64
RET @fnc_v1.blk0.res
}
:return: (rmu.MuVM(), rmu.MuCtx, rmu.MuIRBuilder, MuID, MuID)
"""
i64 = bldr.gen_sym("@i64")
bldr.new_type_int(i64, 64)
c_0x6d9f9c1d58324b55_i64 = bldr.gen_sym("@0x6d9f9c1d58324b55_i64")
bldr.new_const_int(c_0x6d9f9c1d58324b55_i64, i64, 0x6d9f9c1d58324b55)
c_0x0a_i64 = bldr.gen_sym("@0x0a_i64")
bldr.new_const_int(c_0x0a_i64, i64, 0x0a)
sig__i64 = bldr.gen_sym("@sig__i64")
bldr.new_funcsig(sig__i64, [], [i64])
test_fnc = bldr.gen_sym("@test_fnc")
bldr.new_func(test_fnc, sig__i64)
# function body
v1 = bldr.gen_sym("@test_fnc_v1")
blk0 = bldr.gen_sym("@test_fnc_v1.blk0")
res = bldr.gen_sym("@test_fnc_v1.blk0.res")
op_binop = bldr.gen_sym()
bldr.new_binop(op_binop, res, rmu.MuBinOptr.SHL, i64, c_0x6d9f9c1d58324b55_i64, c_0x0a_i64)
op_ret = bldr.gen_sym()
bldr.new_ret(op_ret, [res])
bldr.new_bb(blk0, [], [], rmu.MU_NO_ID, [op_binop, op_ret])
bldr.new_func_ver(v1, test_fnc, [blk0])
return {
"@i64": i64,
"test_fnc_sig": sig__i64,
"test_fnc": test_fnc,
"result_type": i64
}
res = impl_jit_test(cmdopt, build_test_bundle)
if cmdopt.run:
assert res == 0x7e707560c92d5400
def test_lshr(cmdopt):
def build_test_bundle(bldr, rmu):
"""
Builds the following test bundle.
.typedef @i64 = int<64>
.const @0x8d9f9c1d58324b55_i64 <@i64> = 0x8d9f9c1d58324b55
.const @0x0a_i64 <@i64> = 0x0a
.funcsig @sig__i64 = () -> (@i64)
.funcdecl @fnc <@fnrsig__i64>
.funcdef @fnc VERSION @fnc_v1 <@sig__i64> {
@fnc_v1.blk0():
@fnc_v1.blk0.res = LSHR <@i64> @0x8d9f9c1d58324b55_i64 @0x0a_i64
RET @fnc_v1.blk0.res
}
:return: (rmu.MuVM(), rmu.MuCtx, rmu.MuIRBuilder, MuID, MuID)
"""
i64 = bldr.gen_sym("@i64")
bldr.new_type_int(i64, 64)
c_0x8d9f9c1d58324b55_i64 = bldr.gen_sym("@0x8d9f9c1d58324b55_i64")
bldr.new_const_int(c_0x8d9f9c1d58324b55_i64, i64, 0x8d9f9c1d58324b55)
c_0x0a_i64 = bldr.gen_sym("@0x0a_i64")
bldr.new_const_int(c_0x0a_i64, i64, 0x0a)
sig__i64 = bldr.gen_sym("@sig__i64")
bldr.new_funcsig(sig__i64, [], [i64])
test_fnc = bldr.gen_sym("@test_fnc")
bldr.new_func(test_fnc, sig__i64)
# function body
v1 = bldr.gen_sym("@test_fnc_v1")
blk0 = bldr.gen_sym("@test_fnc_v1.blk0")
res = bldr.gen_sym("@test_fnc_v1.blk0.res")
op_binop = bldr.gen_sym()
bldr.new_binop(op_binop, res, rmu.MuBinOptr.LSHR, i64, c_0x8d9f9c1d58324b55_i64, c_0x0a_i64)
op_ret = bldr.gen_sym()
bldr.new_ret(op_ret, [res])
bldr.new_bb(blk0, [], [], rmu.MU_NO_ID, [op_binop, op_ret])
bldr.new_func_ver(v1, test_fnc, [blk0])
return {
"@i64": i64,
"test_fnc_sig": sig__i64,
"test_fnc": test_fnc,
"result_type": i64
}
res = impl_jit_test(cmdopt, build_test_bundle)
if cmdopt.run:
assert res == 0x2367e707560c92
def test_ashr(cmdopt):
def build_test_bundle(bldr, rmu):
"""
Builds the following test bundle.
.typedef @i64 = int<64>
.const @0x8d9f9c1d58324b55_i64 <@i64> = 0x8d9f9c1d58324b55
.const @0x0a_i64 <@i64> = 0x0a
.funcsig @sig__i64 = () -> (@i64)
.funcdecl @fnc <@fnrsig__i64>
.funcdef @fnc VERSION @fnc_v1 <@sig__i64> {
@fnc_v1.blk0():
@fnc_v1.blk0.res = ASHR <@i64> @0x8d9f9c1d58324b55_i64 @0x0a_i64
RET @fnc_v1.blk0.res
}
:return: (rmu.MuVM(), rmu.MuCtx, rmu.MuIRBuilder, MuID, MuID)
"""
i64 = bldr.gen_sym("@i64")
bldr.new_type_int(i64, 64)
c_0x8d9f9c1d58324b55_i64 = bldr.gen_sym("@0x8d9f9c1d58324b55_i64")
bldr.new_const_int(c_0x8d9f9c1d58324b55_i64, i64, 0x8d9f9c1d58324b55)
c_0x0a_i64 = bldr.gen_sym("@0x0a_i64")
bldr.new_const_int(c_0x0a_i64, i64, 0x0a)
sig__i64 = bldr.gen_sym("@sig__i64")
bldr.new_funcsig(sig__i64, [], [i64])
test_fnc = bldr.gen_sym("@test_fnc")
bldr.new_func(test_fnc, sig__i64)
# function body
v1 = bldr.gen_sym("@test_fnc_v1")
blk0 = bldr.gen_sym("@test_fnc_v1.blk0")
res = bldr.gen_sym("@test_fnc_v1.blk0.res")
op_binop = bldr.gen_sym()
bldr.new_binop(op_binop, res, rmu.MuBinOptr.ASHR, i64, c_0x8d9f9c1d58324b55_i64, c_0x0a_i64)
op_ret = bldr.gen_sym()
bldr.new_ret(op_ret, [res])
bldr.new_bb(blk0, [], [], rmu.MU_NO_ID, [op_binop, op_ret])
bldr.new_func_ver(v1, test_fnc, [blk0])
return {
"@i64": i64,
"test_fnc_sig": sig__i64,
"test_fnc": test_fnc,
"result_type": i64
}
res = impl_jit_test(cmdopt, build_test_bundle)
if cmdopt.run:
assert res == 0xffe367e707560c92
def test_and(cmdopt):
def build_test_bundle(bldr, rmu):
"""
Builds the following test bundle.
.typedef @i64 = int<64>
.const @0x8d9f9c1d58324b55_i64 <@i64> = 0x8d9f9c1d58324b55
.const @0xd5a8f2deb00debb4_i64 <@i64> = 0xd5a8f2deb00debb4
.funcsig @sig__i64 = () -> (@i64)
.funcdecl @fnc <@fnrsig__i64>
.funcdef @fnc VERSION @fnc_v1 <@sig__i64> {
@fnc_v1.blk0():
@fnc_v1.blk0.res = AND <@i64> @0x8d9f9c1d58324b55_i64 @0xd5a8f2deb00debb4_i64
RET @fnc_v1.blk0.res
}
:return: (rmu.MuVM(), rmu.MuCtx, rmu.MuIRBuilder, MuID, MuID)
"""
i64 = bldr.gen_sym("@i64")
bldr.new_type_int(i64, 64)
c_0x8d9f9c1d58324b55_i64 = bldr.gen_sym("@0x8d9f9c1d58324b55_i64")
bldr.new_const_int(c_0x8d9f9c1d58324b55_i64, i64, 0x8d9f9c1d58324b55)
c_0xd5a8f2deb00debb4_i64 = bldr.gen_sym("@0xd5a8f2deb00debb4_i64")
bldr.new_const_int(c_0xd5a8f2deb00debb4_i64, i64, 0xd5a8f2deb00debb4)
sig__i64 = bldr.gen_sym("@sig__i64")
bldr.new_funcsig(sig__i64, [], [i64])
test_fnc = bldr.gen_sym("@test_fnc")
bldr.new_func(test_fnc, sig__i64)
# function body
v1 = bldr.gen_sym("@test_fnc_v1")
blk0 = bldr.gen_sym("@test_fnc_v1.blk0")
res = bldr.gen_sym("@test_fnc_v1.blk0.res")
op_binop = bldr.gen_sym()
bldr.new_binop(op_binop, res, rmu.MuBinOptr.AND, i64, c_0x8d9f9c1d58324b55_i64, c_0xd5a8f2deb00debb4_i64)
op_ret = bldr.gen_sym()
bldr.new_ret(op_ret, [res])
bldr.new_bb(blk0, [], [], rmu.MU_NO_ID, [op_binop, op_ret])
bldr.new_func_ver(v1, test_fnc, [blk0])
return {
"@i64": i64,
"test_fnc_sig": sig__i64,
"test_fnc": test_fnc,
"result_type": i64
}
res = impl_jit_test(cmdopt, build_test_bundle)
if cmdopt.run:
assert res == 0x8588901c10004b14
def test_or(cmdopt):
def build_test_bundle(bldr, rmu):
"""
Builds the following test bundle.
.typedef @i64 = int<64>
.const @0x8d9f9c1d58324b55_i64 <@i64> = 0x8d9f9c1d58324b55
.const @0xd5a8f2deb00debb4_i64 <@i64> = 0xd5a8f2deb00debb4
.funcsig @sig__i64 = () -> (@i64)
.funcdecl @fnc <@fnrsig__i64>
.funcdef @fnc VERSION @fnc_v1 <@sig__i64> {
@fnc_v1.blk0():
@fnc_v1.blk0.res = AND <@i64> @0x8d9f9c1d58324b55_i64 @0xd5a8f2deb00debb4_i64
RET @fnc_v1.blk0.res
}
:return: (rmu.MuVM(), rmu.MuCtx, rmu.MuIRBuilder, MuID, MuID)
"""
i64 = bldr.gen_sym("@i64")
bldr.new_type_int(i64, 64)
c_0x8d9f9c1d58324b55_i64 = bldr.gen_sym("@0x8d9f9c1d58324b55_i64")
bldr.new_const_int(c_0x8d9f9c1d58324b55_i64, i64, 0x8d9f9c1d58324b55)
c_0xd5a8f2deb00debb4_i64 = bldr.gen_sym("@0xd5a8f2deb00debb4_i64")
bldr.new_const_int(c_0xd5a8f2deb00debb4_i64, i64, 0xd5a8f2deb00debb4)
sig__i64 = bldr.gen_sym("@sig__i64")
bldr.new_funcsig(sig__i64, [], [i64])
test_fnc = bldr.gen_sym("@test_fnc")
bldr.new_func(test_fnc, sig__i64)
# function body
v1 = bldr.gen_sym("@test_fnc_v1")
blk0 = bldr.gen_sym("@test_fnc_v1.blk0")
res = bldr.gen_sym("@test_fnc_v1.blk0.res")
op_binop = bldr.gen_sym()
bldr.new_binop(op_binop, res, rmu.MuBinOptr.OR, i64, c_0x8d9f9c1d58324b55_i64, c_0xd5a8f2deb00debb4_i64)
op_ret = bldr.gen_sym()
bldr.new_ret(op_ret, [res])
bldr.new_bb(blk0, [], [], rmu.MU_NO_ID, [op_binop, op_ret])
bldr.new_func_ver(v1, test_fnc, [blk0])
return {
"@i64": i64,
"test_fnc_sig": sig__i64,
"test_fnc": test_fnc,
"result_type": i64
}
res = impl_jit_test(cmdopt, build_test_bundle)
if cmdopt.run:
assert res == 0xddbffedff83febf5
def test_xor(cmdopt):
def build_test_bundle(bldr, rmu):
"""
Builds the following test bundle.
.typedef @i64 = int<64>
.const @0x8d9f9c1d58324b55_i64 <@i64> = 0x8d9f9c1d58324b55
.const @0xd5a8f2deb00debb4_i64 <@i64> = 0xd5a8f2deb00debb4
.funcsig @sig__i64 = () -> (@i64)
.funcdecl @fnc <@fnrsig__i64>
.funcdef @fnc VERSION @fnc_v1 <@sig__i64> {
@fnc_v1.blk0():
@fnc_v1.blk0.res = AND <@i64> @0x6d9f9c1d58324b55 @0xd5a8f2deb00debb4_i64
RET @fnc_v1.blk0.res
}
:return: (rmu.MuVM(), rmu.MuCtx, rmu.MuIRBuilder, MuID, MuID)
"""
i64 = bldr.gen_sym("@i64")
bldr.new_type_int(i64, 64)
c_0x8d9f9c1d58324b55_i64 = bldr.gen_sym("@0x8d9f9c1d58324b55_i64")
bldr.new_const_int(c_0x8d9f9c1d58324b55_i64, i64, 0x8d9f9c1d58324b55)
c_0xd5a8f2deb00debb4_i64 = bldr.gen_sym("@0xd5a8f2deb00debb4_i64")
bldr.new_const_int(c_0xd5a8f2deb00debb4_i64, i64, 0xd5a8f2deb00debb4)
sig__i64 = bldr.gen_sym("@sig__i64")
bldr.new_funcsig(sig__i64, [], [i64])
test_fnc = bldr.gen_sym("@test_fnc")
bldr.new_func(test_fnc, sig__i64)
# function body
v1 = bldr.gen_sym("@test_fnc_v1")
blk0 = bldr.gen_sym("@test_fnc_v1.blk0")
res = bldr.gen_sym("@test_fnc_v1.blk0.res")
op_binop = bldr.gen_sym()
bldr.new_binop(op_binop, res, rmu.MuBinOptr.XOR, i64, c_0x8d9f9c1d58324b55_i64, c_0xd5a8f2deb00debb4_i64)
op_ret = bldr.gen_sym()
bldr.new_ret(op_ret, [res])
bldr.new_bb(blk0, [], [], rmu.MU_NO_ID, [op_binop, op_ret])
bldr.new_func_ver(v1, test_fnc, [blk0])
return {
"@i64": i64,
"test_fnc_sig": sig__i64,
"test_fnc": test_fnc,
"result_type": i64
}
res = impl_jit_test(cmdopt, build_test_bundle)
if cmdopt.run:
assert res == 0x58376ec3e83fa0e1
| 35.933628
| 113
| 0.569224
| 3,298
| 24,363
| 3.832929
| 0.026986
| 0.095246
| 0.10284
| 0.061704
| 0.953168
| 0.953168
| 0.951665
| 0.951665
| 0.951665
| 0.951665
| 0
| 0.116991
| 0.301112
| 24,363
| 677
| 114
| 35.986706
| 0.625418
| 0.262037
| 0
| 0.859335
| 0
| 0
| 0.103855
| 0.02905
| 0
| 0
| 0.021062
| 0
| 0.033248
| 1
| 0.066496
| false
| 0
| 0.002558
| 0
| 0.102302
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
884091ab38b6c754a4c81268711f3e209e437aca
| 5,333
|
py
|
Python
|
accounts/migrations/0005_adaboost_dtree_knn_lregression_nbayes_rforest_svm_xgboost.py
|
nahian01/HDPS
|
791d0adfb8e68b96e8cf49a6e6cbeaa290d3baa9
|
[
"CC0-1.0"
] | 1
|
2020-10-07T17:39:27.000Z
|
2020-10-07T17:39:27.000Z
|
accounts/migrations/0005_adaboost_dtree_knn_lregression_nbayes_rforest_svm_xgboost.py
|
nahian01/HDPS
|
791d0adfb8e68b96e8cf49a6e6cbeaa290d3baa9
|
[
"CC0-1.0"
] | null | null | null |
accounts/migrations/0005_adaboost_dtree_knn_lregression_nbayes_rforest_svm_xgboost.py
|
nahian01/HDPS
|
791d0adfb8e68b96e8cf49a6e6cbeaa290d3baa9
|
[
"CC0-1.0"
] | 1
|
2020-10-22T16:02:18.000Z
|
2020-10-22T16:02:18.000Z
|
# Generated by Django 3.0.1 on 2020-02-17 11:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20200217_0959'),
]
operations = [
migrations.CreateModel(
name='XGBoost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('result', models.FloatField()),
('accuracy', models.FloatField()),
('con_00', models.FloatField()),
('con_01', models.FloatField()),
('con_10', models.FloatField()),
('con_11', models.FloatField()),
('disease', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Disease')),
],
),
migrations.CreateModel(
name='SVM',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('result', models.FloatField()),
('accuracy', models.FloatField()),
('con_00', models.FloatField()),
('con_01', models.FloatField()),
('con_10', models.FloatField()),
('con_11', models.FloatField()),
('disease', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Disease')),
],
),
migrations.CreateModel(
name='RForest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('result', models.FloatField()),
('accuracy', models.FloatField()),
('con_00', models.FloatField()),
('con_01', models.FloatField()),
('con_10', models.FloatField()),
('con_11', models.FloatField()),
('disease', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Disease')),
],
),
migrations.CreateModel(
name='Nbayes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('result', models.FloatField()),
('accuracy', models.FloatField()),
('con_00', models.FloatField()),
('con_01', models.FloatField()),
('con_10', models.FloatField()),
('con_11', models.FloatField()),
('disease', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Disease')),
],
),
migrations.CreateModel(
name='LRegression',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('result', models.FloatField()),
('accuracy', models.FloatField()),
('con_00', models.FloatField()),
('con_01', models.FloatField()),
('con_10', models.FloatField()),
('con_11', models.FloatField()),
('disease', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Disease')),
],
),
migrations.CreateModel(
name='KNN',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('result', models.FloatField()),
('accuracy', models.FloatField()),
('con_00', models.FloatField()),
('con_01', models.FloatField()),
('con_10', models.FloatField()),
('con_11', models.FloatField()),
('disease', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Disease')),
],
),
migrations.CreateModel(
name='DTree',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('result', models.FloatField()),
('accuracy', models.FloatField()),
('con_00', models.FloatField()),
('con_01', models.FloatField()),
('con_10', models.FloatField()),
('con_11', models.FloatField()),
('disease', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Disease')),
],
),
migrations.CreateModel(
name='Adaboost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('result', models.FloatField()),
('accuracy', models.FloatField()),
('con_00', models.FloatField()),
('con_01', models.FloatField()),
('con_10', models.FloatField()),
('con_11', models.FloatField()),
('disease', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Disease')),
],
),
]
| 44.815126
| 115
| 0.522033
| 466
| 5,333
| 5.830472
| 0.126609
| 0.282665
| 0.223776
| 0.072874
| 0.906515
| 0.906515
| 0.906515
| 0.906515
| 0.906515
| 0.906515
| 0
| 0.02612
| 0.31802
| 5,333
| 118
| 116
| 45.194915
| 0.720924
| 0.008438
| 0
| 0.857143
| 1
| 0
| 0.113697
| 0.004351
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.017857
| 0
| 0.044643
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
ee05cbe043bd226ffad1a991302b36e9a507b50a
| 46
|
py
|
Python
|
py/PyPLext/__init__.py
|
mlxd/PLext
|
d03be93750c5c8d8fe00db5a53e0466d5132e0c7
|
[
"Apache-2.0"
] | 2
|
2021-10-01T20:59:48.000Z
|
2021-10-04T16:09:49.000Z
|
py/PyPLext/__init__.py
|
mlxd/PLext
|
d03be93750c5c8d8fe00db5a53e0466d5132e0c7
|
[
"Apache-2.0"
] | null | null | null |
py/PyPLext/__init__.py
|
mlxd/PLext
|
d03be93750c5c8d8fe00db5a53e0466d5132e0c7
|
[
"Apache-2.0"
] | null | null | null |
from .pyplext import *
from _PyPLext import *
| 15.333333
| 22
| 0.76087
| 6
| 46
| 5.666667
| 0.5
| 0.647059
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 2
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ee0856b9f3a457c1a46c389c858a43ed538385d1
| 5,443
|
py
|
Python
|
tests/test_ddl_settings.py
|
swiatek25/simple-ddl-parser
|
b46f99b4e1838718bc4024cd281a66cd1b78b165
|
[
"MIT"
] | null | null | null |
tests/test_ddl_settings.py
|
swiatek25/simple-ddl-parser
|
b46f99b4e1838718bc4024cd281a66cd1b78b165
|
[
"MIT"
] | null | null | null |
tests/test_ddl_settings.py
|
swiatek25/simple-ddl-parser
|
b46f99b4e1838718bc4024cd281a66cd1b78b165
|
[
"MIT"
] | null | null | null |
from simple_ddl_parser import DDLParser
def test_sets_with_dot_and_comma():
ddl = """
--
-- PostgreSQL database dump
--
-- Dumped from database version 11.6 (Debian 11.6-1.pgdg90+1)
-- Dumped by pg_dump version 12.9 (Ubuntu 12.9-0ubuntu0.20.04.1)
SET statement_timeout = 0;
SET lock_timeout = 0;
SET idle_in_transaction_session_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = on;
SELECT pg_catalog.set_config('search_path', '', false);
SET check_function_bodies = false;
SET xmloption = content;
SET client_min_messages = warning;
SET row_security = off;
SET default_tablespace = '';
--
-- Name: accounts; Type: TABLE; Schema: public; Owner: myapp
--
"""
result = DDLParser(ddl).run(group_by_type=True, output_mode="bigquery")
expected = {
"ddl_properties": [
{"name": "statement_timeout", "value": "0"},
{"name": "lock_timeout", "value": "0"},
{"name": "idle_in_transaction_session_timeout", "value": "0"},
{"name": "client_encoding", "value": "'UTF8'"},
{"name": "standard_conforming_strings", "value": "on"},
{"name": "check_function_bodies", "value": "false"},
{"name": "xmloption", "value": "content"},
{"name": "client_min_messages", "value": "warning"},
{"name": "row_security", "value": "off"},
{"name": "default_tablespace", "value": "''"},
],
"domains": [],
"schemas": [],
"sequences": [],
"tables": [],
"types": [],
}
assert expected == result
def test_parse_validly_tables_after_set():
ddl = """
--
-- PostgreSQL database dump
--
-- Dumped from database version 11.6 (Debian 11.6-1.pgdg90+1)
-- Dumped by pg_dump version 12.9 (Ubuntu 12.9-0ubuntu0.20.04.1)
SET statement_timeout = 0;
SET lock_timeout = 0;
SET idle_in_transaction_session_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = on;
SELECT pg_catalog.set_config('search_path', '', false);
SET check_function_bodies = false;
SET xmloption = content;
SET client_min_messages = warning;
SET row_security = off;
SET default_tablespace = '';
--
-- Name: accounts; Type: TABLE; Schema: public; Owner: myapp
--
CREATE TABLE public.accounts (
user_id integer NOT NULL,
username character varying(50) NOT NULL,
password character varying(50) NOT NULL,
email character varying(255) NOT NULL,
);
"""
result = DDLParser(ddl).run(group_by_type=True, output_mode="bigquery")
expected = {
"ddl_properties": [
{"name": "statement_timeout", "value": "0"},
{"name": "lock_timeout", "value": "0"},
{"name": "idle_in_transaction_session_timeout", "value": "0"},
{"name": "client_encoding", "value": "'UTF8'"},
{"name": "standard_conforming_strings", "value": "on"},
{"name": "check_function_bodies", "value": "false"},
{"name": "xmloption", "value": "content"},
{"name": "client_min_messages", "value": "warning"},
{"name": "row_security", "value": "off"},
{"name": "default_tablespace", "value": "''"},
],
"domains": [],
"schemas": [],
"sequences": [],
"tables": [
{
"alter": {},
"checks": [],
"columns": [
{
"check": None,
"default": None,
"name": "user_id",
"nullable": False,
"references": None,
"size": None,
"type": "integer",
"unique": False,
},
{
"check": None,
"default": None,
"name": "username",
"nullable": False,
"references": None,
"size": 50,
"type": "character varying",
"unique": False,
},
{
"check": None,
"default": None,
"name": "password",
"nullable": False,
"references": None,
"size": 50,
"type": "character varying",
"unique": False,
},
{
"check": None,
"default": None,
"name": "email",
"nullable": False,
"references": None,
"size": 255,
"type": "character varying",
"unique": False,
},
],
"dataset": "public",
"index": [],
"partitioned_by": [],
"primary_key": [],
"table_name": "accounts",
"tablespace": None,
}
],
"types": [],
}
assert expected == result
| 32.987879
| 75
| 0.457285
| 462
| 5,443
| 5.186147
| 0.257576
| 0.020033
| 0.027546
| 0.042571
| 0.845576
| 0.775876
| 0.775876
| 0.761269
| 0.761269
| 0.761269
| 0
| 0.023249
| 0.399412
| 5,443
| 164
| 76
| 33.189024
| 0.709697
| 0
| 0
| 0.732877
| 0
| 0.013699
| 0.497152
| 0.082308
| 0
| 0
| 0
| 0
| 0.013699
| 1
| 0.013699
| false
| 0.013699
| 0.006849
| 0
| 0.020548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ee175ec09e88d6c6a1549a91cc4a869a95ece66e
| 17,724
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowSdwanTunnelStatistics/cli/equal/ipsec_stats_golden_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowSdwanTunnelStatistics/cli/equal/ipsec_stats_golden_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowSdwanTunnelStatistics/cli/equal/ipsec_stats_golden_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
expected_output = {
"tunnel": {
"150.0.5.1": {
"remote": {
"150.0.0.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.1.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.2.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.3.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.4.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.6.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.7.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.8.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.10.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.40.4": {
"src_port": 12346,
"dst_port": 12366,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.0.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.1.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.2.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.3.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.4.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.6.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.7.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.40.4": {
"src_port": 12346,
"dst_port": 12366,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
}
}
},
"151.0.5.1": {
"remote": {
"150.0.0.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.1.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.2.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.3.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.4.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.6.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.7.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.8.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.10.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"150.0.40.4": {
"src_port": 12346,
"dst_port": 12366,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.0.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.1.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.2.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.3.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.4.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.6.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.7.1": {
"src_port": 12346,
"dst_port": 12346,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
},
"151.0.40.4": {
"src_port": 12346,
"dst_port": 12366,
"ipsec": {
"ipsec_decrypt_inbound": 0,
"ipsec_rx_auth_failures": 0,
"ipsec_rx_failures": 0,
"ipsec_encrypt_outbound": 0,
"ipsec_tx_auth_failures": 0,
"ipsec_tx_failures": 0
}
}
}
}
}
}
| 39.829213
| 52
| 0.335985
| 1,345
| 17,724
| 3.998513
| 0.023048
| 0.200818
| 0.281145
| 0.240982
| 0.995723
| 0.995723
| 0.995723
| 0.995723
| 0.995723
| 0.995723
| 0
| 0.106467
| 0.570752
| 17,724
| 444
| 53
| 39.918919
| 0.600421
| 0
| 0
| 0.815315
| 0
| 0
| 0.309072
| 0.17671
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
ee3f3c8b930f83f0248308903be1d8517eab2f64
| 181
|
py
|
Python
|
question8.py
|
gusenov/test-tech-mail-ru-python2
|
70e37a3de447b6f7c4da5add75f65df1b51405fe
|
[
"MIT"
] | null | null | null |
question8.py
|
gusenov/test-tech-mail-ru-python2
|
70e37a3de447b6f7c4da5add75f65df1b51405fe
|
[
"MIT"
] | null | null | null |
question8.py
|
gusenov/test-tech-mail-ru-python2
|
70e37a3de447b6f7c4da5add75f65df1b51405fe
|
[
"MIT"
] | null | null | null |
print ''.join(['H', 'e', 'l', 'l', 'o']) # Hello
print "hello".title() # Hello
print "hello".upper() # HELLO
print 'Hello, world'[:5] # Hello
print 'Hello, world'[1:6] # ello,
| 30.166667
| 49
| 0.558011
| 27
| 181
| 3.740741
| 0.518519
| 0.39604
| 0.594059
| 0.39604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02
| 0.171271
| 181
| 5
| 50
| 36.2
| 0.653333
| 0.160221
| 0
| 0
| 0
| 0
| 0.267123
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
c9cf88654e3e76c7ea5badbd64ad889a393efaa0
| 40,959
|
py
|
Python
|
pfruck_contabo/api/images_api.py
|
p-fruck/python-contabo
|
c3abd362a0b90783118f36bec0e557bdbe5a8f2c
|
[
"Apache-2.0"
] | 2
|
2022-01-27T10:36:33.000Z
|
2022-03-09T14:21:12.000Z
|
pfruck_contabo/api/images_api.py
|
p-fruck/python-contabo
|
c3abd362a0b90783118f36bec0e557bdbe5a8f2c
|
[
"Apache-2.0"
] | 7
|
2022-01-13T10:44:19.000Z
|
2022-02-15T23:44:44.000Z
|
pfruck_contabo/api/images_api.py
|
p-fruck/python-contabo
|
c3abd362a0b90783118f36bec0e557bdbe5a8f2c
|
[
"Apache-2.0"
] | null | null | null |
"""
Contabo API
The version of the OpenAPI document: 1.0.0
Contact: support@contabo.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from pfruck_contabo.api_client import ApiClient, Endpoint as _Endpoint
from pfruck_contabo.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from pfruck_contabo.model.create_custom_image_fail_response import CreateCustomImageFailResponse
from pfruck_contabo.model.create_custom_image_request import CreateCustomImageRequest
from pfruck_contabo.model.create_custom_image_response import CreateCustomImageResponse
from pfruck_contabo.model.custom_images_stats_response import CustomImagesStatsResponse
from pfruck_contabo.model.find_image_response import FindImageResponse
from pfruck_contabo.model.list_image_response import ListImageResponse
from pfruck_contabo.model.update_custom_image_request import UpdateCustomImageRequest
from pfruck_contabo.model.update_custom_image_response import UpdateCustomImageResponse
class ImagesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.create_custom_image_endpoint = _Endpoint(
settings={
'response_type': (CreateCustomImageResponse,),
'auth': [
'bearer'
],
'endpoint_path': '/v1/compute/images',
'operation_id': 'create_custom_image',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'x_request_id',
'create_custom_image_request',
'x_trace_id',
],
'required': [
'x_request_id',
'create_custom_image_request',
],
'nullable': [
],
'enum': [
],
'validation': [
'x_request_id',
]
},
root_map={
'validations': {
('x_request_id',): {
'regex': {
'pattern': r'^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-5][0-9A-Fa-f]{3}-[089abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$', # noqa: E501
},
},
},
'allowed_values': {
},
'openapi_types': {
'x_request_id':
(str,),
'create_custom_image_request':
(CreateCustomImageRequest,),
'x_trace_id':
(str,),
},
'attribute_map': {
'x_request_id': 'x-request-id',
'x_trace_id': 'x-trace-id',
},
'location_map': {
'x_request_id': 'header',
'create_custom_image_request': 'body',
'x_trace_id': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.delete_image_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'bearer'
],
'endpoint_path': '/v1/compute/images/{imageId}',
'operation_id': 'delete_image',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'x_request_id',
'image_id',
'x_trace_id',
],
'required': [
'x_request_id',
'image_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'x_request_id',
]
},
root_map={
'validations': {
('x_request_id',): {
'regex': {
'pattern': r'^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-5][0-9A-Fa-f]{3}-[089abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$', # noqa: E501
},
},
},
'allowed_values': {
},
'openapi_types': {
'x_request_id':
(str,),
'image_id':
(str,),
'x_trace_id':
(str,),
},
'attribute_map': {
'x_request_id': 'x-request-id',
'image_id': 'imageId',
'x_trace_id': 'x-trace-id',
},
'location_map': {
'x_request_id': 'header',
'image_id': 'path',
'x_trace_id': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client
)
self.retrieve_custom_images_stats_endpoint = _Endpoint(
settings={
'response_type': (CustomImagesStatsResponse,),
'auth': [
'bearer'
],
'endpoint_path': '/v1/compute/images/stats',
'operation_id': 'retrieve_custom_images_stats',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'x_request_id',
'x_trace_id',
],
'required': [
'x_request_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'x_request_id',
]
},
root_map={
'validations': {
('x_request_id',): {
'regex': {
'pattern': r'^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-5][0-9A-Fa-f]{3}-[089abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$', # noqa: E501
},
},
},
'allowed_values': {
},
'openapi_types': {
'x_request_id':
(str,),
'x_trace_id':
(str,),
},
'attribute_map': {
'x_request_id': 'x-request-id',
'x_trace_id': 'x-trace-id',
},
'location_map': {
'x_request_id': 'header',
'x_trace_id': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.retrieve_image_endpoint = _Endpoint(
settings={
'response_type': (FindImageResponse,),
'auth': [
'bearer'
],
'endpoint_path': '/v1/compute/images/{imageId}',
'operation_id': 'retrieve_image',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'x_request_id',
'image_id',
'x_trace_id',
],
'required': [
'x_request_id',
'image_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'x_request_id',
]
},
root_map={
'validations': {
('x_request_id',): {
'regex': {
'pattern': r'^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-5][0-9A-Fa-f]{3}-[089abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$', # noqa: E501
},
},
},
'allowed_values': {
},
'openapi_types': {
'x_request_id':
(str,),
'image_id':
(str,),
'x_trace_id':
(str,),
},
'attribute_map': {
'x_request_id': 'x-request-id',
'image_id': 'imageId',
'x_trace_id': 'x-trace-id',
},
'location_map': {
'x_request_id': 'header',
'image_id': 'path',
'x_trace_id': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.retrieve_image_list_endpoint = _Endpoint(
settings={
'response_type': (ListImageResponse,),
'auth': [
'bearer'
],
'endpoint_path': '/v1/compute/images',
'operation_id': 'retrieve_image_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'x_request_id',
'x_trace_id',
'page',
'size',
'order_by',
'name',
'standard_image',
],
'required': [
'x_request_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'x_request_id',
]
},
root_map={
'validations': {
('x_request_id',): {
'regex': {
'pattern': r'^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-5][0-9A-Fa-f]{3}-[089abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$', # noqa: E501
},
},
},
'allowed_values': {
},
'openapi_types': {
'x_request_id':
(str,),
'x_trace_id':
(str,),
'page':
(int,),
'size':
(int,),
'order_by':
([str],),
'name':
(str,),
'standard_image':
(bool,),
},
'attribute_map': {
'x_request_id': 'x-request-id',
'x_trace_id': 'x-trace-id',
'page': 'page',
'size': 'size',
'order_by': 'orderBy',
'name': 'name',
'standard_image': 'standardImage',
},
'location_map': {
'x_request_id': 'header',
'x_trace_id': 'header',
'page': 'query',
'size': 'query',
'order_by': 'query',
'name': 'query',
'standard_image': 'query',
},
'collection_format_map': {
'order_by': 'multi',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.update_image_endpoint = _Endpoint(
settings={
'response_type': (UpdateCustomImageResponse,),
'auth': [
'bearer'
],
'endpoint_path': '/v1/compute/images/{imageId}',
'operation_id': 'update_image',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'x_request_id',
'image_id',
'update_custom_image_request',
'x_trace_id',
],
'required': [
'x_request_id',
'image_id',
'update_custom_image_request',
],
'nullable': [
],
'enum': [
],
'validation': [
'x_request_id',
]
},
root_map={
'validations': {
('x_request_id',): {
'regex': {
'pattern': r'^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-5][0-9A-Fa-f]{3}-[089abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$', # noqa: E501
},
},
},
'allowed_values': {
},
'openapi_types': {
'x_request_id':
(str,),
'image_id':
(str,),
'update_custom_image_request':
(UpdateCustomImageRequest,),
'x_trace_id':
(str,),
},
'attribute_map': {
'x_request_id': 'x-request-id',
'image_id': 'imageId',
'x_trace_id': 'x-trace-id',
},
'location_map': {
'x_request_id': 'header',
'image_id': 'path',
'update_custom_image_request': 'body',
'x_trace_id': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def create_custom_image(
self,
x_request_id,
create_custom_image_request,
**kwargs
):
"""Provide a custom image # noqa: E501
In order to provide a custom image please specify an URL from where the image can be directly downloaded. A custom image must be in either `.iso` or `.qcow2` format. Other formats will be rejected. Please note that downloading can take a while depending on network speed resp. bandwidth and size of image. You can check the status by retrieving information about the image via a GET request. Download will be rejected if you have exceeded your limits. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_custom_image(x_request_id, create_custom_image_request, async_req=True)
>>> result = thread.get()
Args:
x_request_id (str): [Uuid4](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)) to identify individual requests for support cases. You can use [uuidgenerator](https://www.uuidgenerator.net/version4) to generate them manually.
create_custom_image_request (CreateCustomImageRequest):
Keyword Args:
x_trace_id (str): Identifier to trace group of requests.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
CreateCustomImageResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['x_request_id'] = \
x_request_id
kwargs['create_custom_image_request'] = \
create_custom_image_request
return self.create_custom_image_endpoint.call_with_http_info(**kwargs)
def delete_image(
self,
x_request_id,
image_id,
**kwargs
):
"""Delete an uploaded custom image by its id # noqa: E501
Your are free to delete a previously uploaded custom images at any time. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_image(x_request_id, image_id, async_req=True)
>>> result = thread.get()
Args:
x_request_id (str): [Uuid4](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)) to identify individual requests for support cases. You can use [uuidgenerator](https://www.uuidgenerator.net/version4) to generate them manually.
image_id (str): The identifier of the image
Keyword Args:
x_trace_id (str): Identifier to trace group of requests.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['x_request_id'] = \
x_request_id
kwargs['image_id'] = \
image_id
return self.delete_image_endpoint.call_with_http_info(**kwargs)
def retrieve_custom_images_stats(
self,
x_request_id,
**kwargs
):
"""List statistics regarding the customer's custom images # noqa: E501
List statistics regarding the customer's custom images such as the number of custom images uploaded, used disk space, free available disk space and total available disk space # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_custom_images_stats(x_request_id, async_req=True)
>>> result = thread.get()
Args:
x_request_id (str): [Uuid4](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)) to identify individual requests for support cases. You can use [uuidgenerator](https://www.uuidgenerator.net/version4) to generate them manually.
Keyword Args:
x_trace_id (str): Identifier to trace group of requests.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
CustomImagesStatsResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['x_request_id'] = \
x_request_id
return self.retrieve_custom_images_stats_endpoint.call_with_http_info(**kwargs)
def retrieve_image(
self,
x_request_id,
image_id,
**kwargs
):
"""Get details about a specific image by its id # noqa: E501
Get details about a specific image. This could be either a standard or custom image. In case of an custom image you can also check the download status. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_image(x_request_id, image_id, async_req=True)
>>> result = thread.get()
Args:
x_request_id (str): [Uuid4](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)) to identify individual requests for support cases. You can use [uuidgenerator](https://www.uuidgenerator.net/version4) to generate them manually.
image_id (str): The identifier of the image
Keyword Args:
x_trace_id (str): Identifier to trace group of requests.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
FindImageResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['x_request_id'] = \
x_request_id
kwargs['image_id'] = \
image_id
return self.retrieve_image_endpoint.call_with_http_info(**kwargs)
def retrieve_image_list(
self,
x_request_id,
**kwargs
):
"""List available standard and custom images # noqa: E501
List and filter all available standard images provided by [Contabo](https://contabo.com) and your uploaded custom images. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.retrieve_image_list(x_request_id, async_req=True)
>>> result = thread.get()
Args:
x_request_id (str): [Uuid4](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)) to identify individual requests for support cases. You can use [uuidgenerator](https://www.uuidgenerator.net/version4) to generate them manually.
Keyword Args:
x_trace_id (str): Identifier to trace group of requests.. [optional]
page (int): Number of page to be fetched.. [optional]
size (int): Number of elements per page.. [optional]
order_by ([str]): Specify fields and ordering (ASC for ascending, DESC for descending) in following format `field:ASC|DESC`.. [optional]
name (str): The name of the image. [optional]
standard_image (bool): Flag indicating that image is either a standard (true) or a custom image (false). [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
ListImageResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['x_request_id'] = \
x_request_id
return self.retrieve_image_list_endpoint.call_with_http_info(**kwargs)
def update_image(
self,
x_request_id,
image_id,
update_custom_image_request,
**kwargs
):
"""Update custom image name by its id # noqa: E501
Update name of the custom image. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_image(x_request_id, image_id, update_custom_image_request, async_req=True)
>>> result = thread.get()
Args:
x_request_id (str): [Uuid4](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)) to identify individual requests for support cases. You can use [uuidgenerator](https://www.uuidgenerator.net/version4) to generate them manually.
image_id (str): The identifier of the image
update_custom_image_request (UpdateCustomImageRequest):
Keyword Args:
x_trace_id (str): Identifier to trace group of requests.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
UpdateCustomImageResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['x_request_id'] = \
x_request_id
kwargs['image_id'] = \
image_id
kwargs['update_custom_image_request'] = \
update_custom_image_request
return self.update_image_endpoint.call_with_http_info(**kwargs)
| 40.593657
| 473
| 0.514759
| 4,151
| 40,959
| 4.836425
| 0.077813
| 0.031082
| 0.038852
| 0.008966
| 0.858936
| 0.836671
| 0.819586
| 0.801106
| 0.787906
| 0.771867
| 0
| 0.009021
| 0.39647
| 40,959
| 1,008
| 474
| 40.633929
| 0.803115
| 0.397007
| 0
| 0.665165
| 1
| 0.009009
| 0.257505
| 0.0711
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010511
| false
| 0
| 0.018018
| 0
| 0.039039
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4e5579813e3e5e6843a2231fc4950533006afbf4
| 11,127
|
py
|
Python
|
betrack/utils/parser.py
|
gvalentini85/betrack-cli
|
981dc22baf0b60914816ac5965a1a9f171767e47
|
[
"MIT"
] | null | null | null |
betrack/utils/parser.py
|
gvalentini85/betrack-cli
|
981dc22baf0b60914816ac5965a1a9f171767e47
|
[
"MIT"
] | 10
|
2018-05-12T00:49:27.000Z
|
2018-06-05T17:31:14.000Z
|
betrack/utils/parser.py
|
gvalentini85/betrack-cli
|
981dc22baf0b60914816ac5965a1a9f171767e47
|
[
"MIT"
] | null | null | null |
#------------------------------------------------------------------------------#
# Copyright 2018 Gabriele Valentini. All rights reserved. Use of this source #
# code is governed by a MIT license that can be found in the LICENSE file. #
#------------------------------------------------------------------------------#
"""
Description of the `parser` module..
"""
from os.path import isfile, isdir, abspath
import errno
import yaml
from .message import wprint
def open_configuration(filename):
"""
"""
if isfile(filename):
# Open yml file..
filename = abspath(filename)
with open(filename, 'r') as f:
config = yaml.safe_load(f)
return config
else:
# Raise exception
raise IOError(errno.ENOENT, 'file not found', filename)
def parse_file(src, key):
"""
Parses a dictionary ``src`` and returns the file name specified by ``key``.
This function checks that the file specified by ``key`` exists.
:param dict src: the source dictionary
:param str key: the key specifing the file name to be parsed
:returns: a file name
:rtype: str
:raises IOError: if the file specified by ``key`` is not found
:raises KeyError: if the attribute ``key`` is not found in ``src``
"""
if key in src:
val = src.get(key)
if isfile(val):
return val
else:
raise IOError(errno.ENOENT, 'file not found', val)
else:
raise KeyError('attribute not found!', key)
def parse_directory(src, key):
"""
Parses a dictionary ``src`` and returns the directory name specified by ``key``.
This function checks that the directory specified by ``key`` exists.
:param dict src: the source dictionary
:param str key: the key specifing the directory name to be parsed
:returns: a directory name
:rtype: str
:raises IOError: if the directory specified by ``key`` is not found
:raises KeyError: if the attribute ``key`` is not found in ``src``
"""
if key in src:
val = src.get(key)
if isdir(val):
return val
else:
raise IOError(errno.ENOENT, 'directory not found', val)
else:
raise KeyError('attribute not found!', key)
def parse_int(src, key, nentries=1):
"""
Parses a dictionary ``src`` and returns a number ``nentries`` of integers
specified by ``key``. This function checks that the value or values specified
by ``key`` are of type integer and raises a ``ValueError`` otherwise.
:param dict src: the source dictionary
:param str key: the key specifing the integers to be parsed
:param int nentries: the number of integers to be parsed
:returns: parsed integer(s)
:rtype: int
:rtype: list
:raises ValueError: if the parsed values are not valid
:raises KeyError: if the attribute ``key`` is not found in ``src``
"""
if nentries < 1:
raise ValueError('expected number of entries must be greater than zero')
if key in src:
val = src.get(key)
if type(val) == int:
if nentries != 1:
msg = 'attribute ' + key + ' has 1 entry, expected ' + str(nentries)
raise ValueError(msg)
return val
elif type(val) == list:
nval = len(val)
if nval != nentries:
msg = 'attribute ' + key + ' has ' + str(nval)
msg += ' entries, expected ' + str(nentries)
raise ValueError(msg)
for m in range(0, nval):
if type(val[m]) != int:
raise ValueError('entry ' + str(m + 1) + ' is not int')
return val
else:
raise ValueError('attribute ' + key + ' is not of type int or list')
else:
raise KeyError('attribute not found!', key)
def parse_float(src, key, nentries=1):
"""
Parse a dictionary ``src`` and return a float or a list of float specified by ``key``.
This function checks that the value or values specified by ``key`` is of
type float or list of float and raises a ``ValueError`` otherwise.
:param dict src: the source dictionary
:param str key: the key specifing the directory name
:param int nentries: the number of floats to parse
:returns: read float(s)
:rtype: float or list of float
:raises ValueError: if the parsed values are not valid
:raises KeyError: if the attribute ``key`` is not found in ``src``
"""
if nentries < 1:
raise ValueError('expected number of entries must be greater than zero')
if key in src:
val = src.get(key)
if type(val) == float:
if nentries != 1:
msg = 'attribute ' + key + ' has 1 entry, expected ' + str(nentries)
raise ValueError(msg)
return val
elif type(val) == list:
nval = len(val)
if nval != nentries:
msg = 'attribute ' + key + ' has ' + str(nval)
msg += ' entries, expected ' + str(nentries)
raise ValueError(msg)
for m in range(0, nval):
if type(val[m]) != float:
raise ValueError('entry ' + str(m + 1) + ' is not float')
return val
else:
raise ValueError('attribute ' + key + ' is not of type float or list')
else:
raise KeyError('attribute not found!', key)
def parse_int_or_float(src, key, nentries=1):
"""
Parse a dictionary ``src`` and return an int or float or a list of int or
float specified by ``key``. This function checks that the value or values
specified by ``key`` is of type int or float or list of int or float and
raises a ``ValueError`` otherwise.
:param dict src: the source dictionary
:param str key: the key specifing the directory name
:param int nentries: the number of floats to parse
:returns: read float(s)
:rtype: float or list of float
:raises ValueError: if the parsed values are not valid
:raises KeyError: if the attribute ``key`` is not found in ``src``
"""
if nentries < 1:
raise ValueError('expected number of entries must be greater than zero')
if key in src:
val = src.get(key)
if type(val) == int or type(val) == float:
if nentries != 1:
msg = 'attribute ' + key + ' has 1 entry, expected ' + str(nentries)
raise ValueError(msg)
return val
elif type(val) == list:
nval = len(val)
if nval != nentries:
msg = 'attribute ' + key + ' has ' + str(nval)
msg += ' entries, expected ' + str(nentries)
raise ValueError(msg)
for m in range(0, nval):
if type(val[m]) != int and type(val[m]) != float:
raise ValueError('entry ' + str(m + 1) + ' is not int or float')
return val
else:
raise ValueError('attribute ' + key + ' is not of type int or float or list')
else:
raise KeyError('attribute not found!', key)
def parse_bool(src, key, nentries=1):
"""
Parse a dictionary ``src`` and return a bool or a list of bool specified by ``key``.
This function checks that the value or values specified by ``key`` is of
type bool or list of bool and raises a ``ValueError`` otherwise.
:param dict src: the source dictionary
:param str key: the key specifing the directory name
:param int nentries: the number of booleans to parse
:returns: read boolean(s)
:rtype: bool or list of bool
:raises ValueError: if the parsed values are not valid
:raises KeyError: if the attribute ``key`` is not found in ``src``
"""
if nentries < 1:
raise ValueError('expected number of entries must be greater than zero')
if key in src:
val = src.get(key)
if type(val) == bool:
if nentries != 1:
msg = 'attribute ' + key + ' has 1 entry, expected ' + str(nentries)
raise ValueError(msg)
return val
elif type(val) == list:
nval = len(val)
if nval != nentries:
msg = 'attribute ' + key + ' has ' + str(nval)
msg += ' entries, expected ' + str(nentries)
raise ValueError(msg)
for m in range(0, nval):
if type(val[m]) != bool:
raise ValueError('entry ' + str(m + 1) + ' is not bool')
return val
else:
raise ValueError('attribute ' + key + ' is not of type bool or list')
else:
raise KeyError('attribute not found!', key)
def parse_str(src, key, nentries=1):
"""
Parse a dictionary ``src`` and return a str or a list of str
specified by ``key``. This function checks that the value or values specified by
``key`` is of type str or list of str and raises a ``ValueError`` otherwise.
:param dict src: the source dictionary
:param str key: the key specifing the directory name
:param int nentries: the number of booleans to parse
:returns: read boolean(s)
:rtype: bool or list of bool
:raises ValueError: if the parsed values are not valid
:raises KeyError: if the attribute ``key`` is not found in ``src``
"""
if nentries < 1:
raise ValueError('expected number of entries must be greater than zero')
if key in src:
val = src.get(key)
try: isunicode = (type(val) == unicode)
except NameError as err: isunicode = False
if type(val) == str or isunicode:
val = val.encode()
if nentries != 1:
msg = 'attribute ' + key + ' has 1 entry, expected ' + str(nentries)
raise ValueError(msg)
return val
elif type(val) == list:
nval = len(val)
if nval != nentries:
msg = 'attribute ' + key + ' has ' + str(nval)
msg += ' entries, expected ' + str(nentries)
raise ValueError(msg)
for m in range(0, nval):
try: isunicode = (type(val[m]) == unicode)
except NameError as err: isunicode = False
if type(val[m]) != str and not isunicode:
raise ValueError('entry ' + str(m + 1) + ' is not str')
else: val[m] = val[m].encode()
return val
else:
raise ValueError('attribute ' + key + ' is not of type str or list')
else:
raise KeyError('attribute not found!', key)
| 38.106164
| 91
| 0.544621
| 1,387
| 11,127
| 4.361211
| 0.094448
| 0.061994
| 0.037031
| 0.033725
| 0.87469
| 0.85634
| 0.840304
| 0.821293
| 0.798479
| 0.758307
| 0
| 0.004692
| 0.348791
| 11,127
| 291
| 92
| 38.237113
| 0.830113
| 0.359306
| 0
| 0.705128
| 0
| 0
| 0.15965
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.025641
| 0
| 0.160256
| 0.00641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4e74e379c6bd31770ed7c96a47c3f5d0b6cb6fae
| 16,470
|
py
|
Python
|
application.py
|
mudit9/covid-vaccine-india
|
0b203732518ea91615cc3ead5b15fe75e6db475f
|
[
"MIT"
] | 3
|
2021-05-04T23:09:36.000Z
|
2021-09-29T04:00:57.000Z
|
application.py
|
mudit9/covid-vaccine-india
|
0b203732518ea91615cc3ead5b15fe75e6db475f
|
[
"MIT"
] | null | null | null |
application.py
|
mudit9/covid-vaccine-india
|
0b203732518ea91615cc3ead5b15fe75e6db475f
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request
import requests
import pandas as pd
import traceback
from datetime import date, timedelta, datetime
import json
application = Flask(__name__)
@application.route('/')
@application.route('/index.html')
def index():
return render_template('index.html')
@application.route('/about.html')
def about():
return render_template('about.html')
@application.route('/nearbypincodes')
def getNearbyPincodes():
try:
#pincode = request.args.get('pincode')
date = request.args.get('date')
#vars = request.args.get('vars')
pincode = request.args.get('pincode')
pincode = pincode.replace(']',"").replace(']',"").replace("'","")
pincodes = []
pincodes.append(int(pincode)-2)
pincodes.append(int(pincode)-1)
pincodes.append(int(pincode))
pincodes.append(int(pincode)+1)
pincodes.append(int(pincode)+2)
pincodes.append(int(pincode)+3)
multirows = []
invalids = []
for pincode_item in pincodes:
#print(str(pincode_item))
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
response = requests.get("https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByPin?pincode="+str(pincode_item).strip()+"&date="+date,headers=headers)
#print('respone',response.text)
#print(response.text)
s = json.loads(response.text)
#print(s.keys())
if 'error' in s.keys():
if s['error'] == 'Invalid Pincode':
multirows.append([])
invalids.append("Invalid Pincode")
#return render_template('dates.html', rows = [],invalid="Invalid Pincode",pincode = pincode,date = date)
#print(s['sessions'])
else:
df = pd.DataFrame(s['sessions'])
#print(df.columns)
try:
df = df.sort_values(by=['min_age_limit'])
except Exception as e:
print("no min age limit")
rows = []
for i,r in df.iterrows():
Slots = ','.join(r['slots'])
r['CenterName'] = r['name']
r['Slots'] = Slots
#print(r)
rows.append(r)
invalids.append(None)
multirows.append(rows)
vars = zip(multirows,invalids,pincodes)
#print("invalids",invalids)
return render_template('multiplepincodes.html',vars = vars,pincodes=pincodes,date = date)
except Exception as e:
print(e)
return render_template('multiplepincodes.html', rows = [],invalid="Something went wrong.")
@application.route('/multipledatesnext45')
def getNextMultipleDates45():
try:
#pincode = request.args.get('pincode')
date = request.args.get('date')
#vars = request.args.get('vars')
pincode = request.args.get('pincode')
pincode = pincode.replace(']',"").replace(']',"").replace("'","")
date_time_obj = datetime.strptime(date, '%d/%m/%y')
tomorrow = date_time_obj + timedelta(days = 1)
date = datetime.strptime(str(tomorrow), '%Y-%m-%d %H:%M:%S').strftime('%d/%m/%y')
multirows = []
invalids = []
dates = [date]
for i in range(0,45):
date_time_obj = datetime.strptime(date, '%d/%m/%y')
tomorrow = date_time_obj + timedelta(days = 1)
date = datetime.strptime(str(tomorrow), '%Y-%m-%d %H:%M:%S').strftime('%d/%m/%y')
dates.append(date)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
response = requests.get("https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByPin?pincode="+str(pincode).strip()+"&date="+date,headers=headers)
#print('respone',response.text)
#print(response.text)
s = json.loads(response.text)
#print(s.keys())
if 'error' in s.keys():
if s['error'] == 'Invalid Pincode':
multirows.append([])
invalids.append("Invalid Pincode")
#return render_template('dates.html', rows = [],invalid="Invalid Pincode",pincode = pincode,date = date)
#print(s['sessions'])
else:
df = pd.DataFrame(s['sessions'])
#print(df.columns)
try:
df = df.sort_values(by=['min_age_limit'])
except Exception as e:
print("no min age limit")
rows = []
for i,r in df.iterrows():
Slots = ','.join(r['slots'])
r['CenterName'] = r['name']
r['Slots'] = Slots
#print(r)
rows.append(r)
invalids.append(None)
multirows.append(rows)
vars = zip(multirows,invalids,dates)
#print("invalids",invalids)
return render_template('multiplenextdates.html',vars = vars,pincode=pincode,dates = dates, lastdate = dates[-1])
except Exception as e:
print(traceback.format_exc(e))
return render_template('multiplenextdates.html', rows = [],invalid="Something went wrong.")
@application.route('/multipledatesnext')
def getSlotsNextMultipleDates():
try:
#pincode = request.args.get('pincode')
date = request.args.get('date')
#vars = request.args.get('vars')
pincode = request.args.get('pincode')
pincode = pincode.replace(']',"").replace(']',"").replace("'","")
date_time_obj = datetime.strptime(date, '%d/%m/%y')
tomorrow = date_time_obj + timedelta(days = 1)
date = datetime.strptime(str(tomorrow), '%Y-%m-%d %H:%M:%S').strftime('%d/%m/%y')
multirows = []
invalids = []
dates = [date]
for i in range(0,5):
date_time_obj = datetime.strptime(date, '%d/%m/%y')
tomorrow = date_time_obj + timedelta(days = 1)
date = datetime.strptime(str(tomorrow), '%Y-%m-%d %H:%M:%S').strftime('%d/%m/%y')
dates.append(date)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
response = requests.get("https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByPin?pincode="+str(pincode).strip()+"&date="+date,headers=headers)
#print('respone',response.text)
#print(response.text)
s = json.loads(response.text)
#print(s.keys())
if 'error' in s.keys():
if s['error'] == 'Invalid Pincode':
multirows.append([])
invalids.append("Invalid Pincode")
#return render_template('dates.html', rows = [],invalid="Invalid Pincode",pincode = pincode,date = date)
#print(s['sessions'])
else:
df = pd.DataFrame(s['sessions'])
#print(df.columns)
try:
df = df.sort_values(by=['min_age_limit'])
except Exception as e:
print("no min age limit")
rows = []
for i,r in df.iterrows():
Slots = ','.join(r['slots'])
r['CenterName'] = r['name']
r['Slots'] = Slots
#print(r)
rows.append(r)
invalids.append(None)
multirows.append(rows)
vars = zip(multirows,invalids,dates)
#print("invalids",invalids)
return render_template('multiplenextdates.html',vars = vars,pincode=pincode,dates = dates, lastdate = dates[-1])
except Exception as e:
print(e)
return render_template('multiplenextdates.html', rows = [],invalid="Something went wrong.")
@application.route('/multiplenextdates')
def getSlotsMultipleNextDay():
try:
#pincode = request.args.get('pincode')
date = request.args.get('date')
#vars = request.args.get('vars')
pincodes = request.args.getlist('pincodes')
pincodes = pincodes[0].replace("'", "").replace('[',"").replace(']',"")
pincodes = pincodes.split(',')
#print(pincodes,len(pincodes))
date_time_obj = datetime.strptime(date, '%d/%m/%y')
tomorrow = date_time_obj + timedelta(days = 1)
date = datetime.strptime(str(tomorrow), '%Y-%m-%d %H:%M:%S').strftime('%d/%m/%y')
multirows = []
invalids = []
for pincode_item in pincodes:
# print(str(pincode_item))
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
response = requests.get("https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByPin?pincode="+str(pincode_item).strip()+"&date="+date,headers=headers)
#print('respone',response.text)
#print(response.text)
s = json.loads(response.text)
#print(s.keys())
if 'error' in s.keys():
if s['error'] == 'Invalid Pincode':
multirows.append([])
invalids.append("Invalid Pincode")
#return render_template('dates.html', rows = [],invalid="Invalid Pincode",pincode = pincode,date = date)
#print(s['sessions'])
else:
df = pd.DataFrame(s['sessions'])
#print(df.columns)
try:
df = df.sort_values(by=['min_age_limit'])
except Exception as e:
print("no min age limit")
rows = []
for i,r in df.iterrows():
Slots = ','.join(r['slots'])
r['CenterName'] = r['name']
r['Slots'] = Slots
#print(r)
rows.append(r)
invalids.append(None)
multirows.append(rows)
vars = zip(multirows,invalids,pincodes)
#print("invalids",invalids)
return render_template('multipledates.html',vars = vars,pincodes=pincodes,date = date)
except Exception as e:
print(e)
return render_template('multipledates.html', rows = [],invalid="Something went wrong.")
@application.route('/nextdates')
def getSlotsNextDay():
try:
pincode = request.args.get('pincode')
date = request.args.get('date')
date_time_obj = datetime.strptime(date, '%d/%m/%y')
tomorrow = date_time_obj + timedelta(days = 1)
date = datetime.strptime(str(tomorrow), '%Y-%m-%d %H:%M:%S').strftime('%d/%m/%y')
#date = datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%d/%m/%y')
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
response = requests.get("https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByPin?pincode="+str(pincode).strip()+"&date="+str(date),headers=headers)
#print('respone',response.text)
#print(response.text)
s = json.loads(response.text)
#print(s.keys())
if 'error' in s.keys():
if s['error'] == 'Invalid Pincode':
return render_template('dates.html', rows = [],invalid="Invalid Pincode",pincode = "pincode",date = date)
#print(s['sessions'])
else:
df = pd.DataFrame(s['sessions'])
try:
df = df.sort_values(by=['min_age_limit'])
except Exception as e:
print("no min age limit")
rows = []
for i,r in df.iterrows():
Slots = ','.join(r['slots'])
r['CenterName'] = r['name']
r['Slots'] = Slots
#print(r)
rows.append(r)
except Exception as e:
print(e)
return render_template('dates.html', rows = [],invalid="Something went wrong.")
#print('something went wrong.')
#print(rows)
return render_template('dates.html', rows = rows,pincode = pincode,date = date)
@application.route('/dates.html' ,methods=['POST'])
def getDates():
try:
pincode = request.form['pincode']
date = request.form['date']
date = datetime.strptime(date, '%Y-%m-%d').strftime('%d/%m/%y')
#print(date)
pincodes = pincode.split(',')
if len(pincodes)>1:
multirows = []
invalids = []
for pincode_item in pincodes:
#print(str(pincode_item))
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
response = requests.get("https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByPin?pincode="+str(pincode_item).strip()+"&date="+date,headers=headers)
#print('respone',response.text)
s = json.loads(response.text)
#print(s.keys())
if 'error' in s.keys():
if s['error'] == 'Invalid Pincode':
multirows.append([])
invalids.append("Invalid Pincode")
#return render_template('dates.html', rows = [],invalid="Invalid Pincode",pincode = pincode,date = date)
#print(s['sessions'])
else:
df = pd.DataFrame(s['sessions'])
#print(df.columns)
try:
df = df.sort_values(by=['min_age_limit'])
except Exception as e:
print("no min age limit")
rows = []
for i,r in df.iterrows():
Slots = ','.join(r['slots'])
r['CenterName'] = r['name']
r['Slots'] = Slots
#print(r)
rows.append(r)
invalids.append(None)
multirows.append(rows)
vars = zip(multirows, invalids,pincodes)
#print("invalids",invalids)
return render_template('multipledates.html',vars = vars,pincodes = pincodes,date = date)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
response = requests.get("https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/findByPin?pincode="+str(pincode).strip()+"&date="+date,headers=headers)
#print('respone',response.text)
s = json.loads(response.text)
#print(s.keys())
if 'error' in s.keys():
if s['error'] == 'Invalid Pincode':
return render_template('dates.html', rows = [],invalid="Invalid Pincode",pincode = pincode,date = date)
#print(s['sessions'])
else:
df = pd.DataFrame(s['sessions'])
try:
df = df.sort_values(by=['min_age_limit'])
except Exception as e:
print("no min age limit")
rows = []
for i,r in df.iterrows():
Slots = ','.join(r['slots'])
r['CenterName'] = r['name']
r['Slots'] = Slots
#print(r)
rows.append(r)
except Exception as e:
print(traceback.format_exc(e))
return render_template('dates.html', rows = [],invalid="Something went wrong.")
#print(rows)
return render_template('dates.html', rows = rows,pincode = pincode,date = date)
if __name__ == '__main__':
application.run(debug=True)
| 42.55814
| 182
| 0.525076
| 1,787
| 16,470
| 4.788472
| 0.08282
| 0.039266
| 0.05142
| 0.027346
| 0.909197
| 0.898679
| 0.898679
| 0.898679
| 0.887227
| 0.869931
| 0
| 0.018477
| 0.323072
| 16,470
| 386
| 183
| 42.668394
| 0.749036
| 0.112325
| 0
| 0.810409
| 0
| 0.052045
| 0.202954
| 0.008932
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02974
| false
| 0
| 0.022305
| 0.007435
| 0.115242
| 0.048327
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4e863e3a08aa81b55c16bb8de060b5eb07e9ef0e
| 126
|
py
|
Python
|
sipmath/__init__.py
|
colsmit/sipmath
|
5036bdcf3fa956ee37fbab0b862e2e470ae12a9e
|
[
"MIT"
] | 5
|
2019-04-24T07:29:07.000Z
|
2021-07-13T23:42:03.000Z
|
sipmath/__init__.py
|
colsmit/sipmath
|
5036bdcf3fa956ee37fbab0b862e2e470ae12a9e
|
[
"MIT"
] | null | null | null |
sipmath/__init__.py
|
colsmit/sipmath
|
5036bdcf3fa956ee37fbab0b862e2e470ae12a9e
|
[
"MIT"
] | 2
|
2020-01-19T10:23:46.000Z
|
2020-04-18T09:14:14.000Z
|
#from .sipmodel import sipmodel
#from .sipinput import sipinput
from .sipmodel import sipmodel
from .sipinput import sipinput
| 25.2
| 31
| 0.825397
| 16
| 126
| 6.5
| 0.25
| 0.230769
| 0.346154
| 0.5
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0.126984
| 126
| 5
| 32
| 25.2
| 0.945455
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 12
|
14c84e01e8eae7b2daecdaa15f675eb45ac38eb9
| 97
|
py
|
Python
|
Python/CodingBat/extra_end.py
|
dvt32/cpp-journey
|
afd7db7a1ad106c41601fb09e963902187ae36e6
|
[
"MIT"
] | 1
|
2018-05-24T11:30:05.000Z
|
2018-05-24T11:30:05.000Z
|
Python/CodingBat/extra_end.py
|
dvt32/cpp-journey
|
afd7db7a1ad106c41601fb09e963902187ae36e6
|
[
"MIT"
] | null | null | null |
Python/CodingBat/extra_end.py
|
dvt32/cpp-journey
|
afd7db7a1ad106c41601fb09e963902187ae36e6
|
[
"MIT"
] | 2
|
2017-08-11T06:53:30.000Z
|
2017-08-29T12:07:52.000Z
|
# http://codingbat.com/prob/p148853
def extra_end(str):
return str[-2:] + str[-2:] + str[-2:]
| 19.4
| 39
| 0.618557
| 16
| 97
| 3.6875
| 0.6875
| 0.20339
| 0.237288
| 0.271186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 0.134021
| 97
| 4
| 40
| 24.25
| 0.595238
| 0.340206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
14cb86321a0474e9aa21dd4003bd4091513f8f3f
| 153
|
py
|
Python
|
test_login.py
|
Zcsff/gitHub_project
|
c8cc0f5fbb0c4634ffe3bdcd7296d28404389edc
|
[
"Apache-2.0"
] | null | null | null |
test_login.py
|
Zcsff/gitHub_project
|
c8cc0f5fbb0c4634ffe3bdcd7296d28404389edc
|
[
"Apache-2.0"
] | null | null | null |
test_login.py
|
Zcsff/gitHub_project
|
c8cc0f5fbb0c4634ffe3bdcd7296d28404389edc
|
[
"Apache-2.0"
] | null | null | null |
class TestLogin:
def test_login1(self):
assert 2
def test_login2(self):
assert 2
def test_login3(self):
assert 2
| 12.75
| 26
| 0.581699
| 20
| 153
| 4.3
| 0.5
| 0.244186
| 0.383721
| 0.325581
| 0.418605
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0.352941
| 153
| 11
| 27
| 13.909091
| 0.808081
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.428571
| false
| 0
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
094cc65219dd85a352d9a329924f94f6fd2291d0
| 2,224
|
py
|
Python
|
src/gold_queries.py
|
UCL-RITS/ClusterStats-Gold
|
e6450114ad8abd2a5c35d28d0d49d17020301d4f
|
[
"MIT"
] | null | null | null |
src/gold_queries.py
|
UCL-RITS/ClusterStats-Gold
|
e6450114ad8abd2a5c35d28d0d49d17020301d4f
|
[
"MIT"
] | null | null | null |
src/gold_queries.py
|
UCL-RITS/ClusterStats-Gold
|
e6450114ad8abd2a5c35d28d0d49d17020301d4f
|
[
"MIT"
] | null | null | null |
# SQlite queries for the Gold database.
# The values are inserted at the ? by cursor.execute(query, (val1, val2, val3))
# Get allocation usage data for all institutes for all time.
# Order by allocation start, end and then project name
# (as first allocation all starts on same date).
def gold_by_all_allocation_periods():
query = ("""SELECT j.g_project, sum(j.g_charge), r.g_id as date_alloc, r.g_account,
a.g_start_time, a.g_end_time, count(*) as num_jobs
FROM g_job AS j
INNER JOIN g_reservation_allocation AS r
ON j.g_request_id = r.g_request_id
INNER JOIN g_allocation AS a
ON date_alloc = a.g_id
GROUP BY date_alloc
ORDER BY a.g_start_time, a.g_end_time, j.g_project""")
return query
# Get data for all institutes with an allocation that begins in a given time period.
def gold_by_allocation_start_period():
query = ("""SELECT j.g_project, sum(j.g_charge), r.g_id as date_alloc, r.g_account,
a.g_start_time, a.g_end_time, count(*) as num_jobs
FROM g_job AS j
INNER JOIN g_reservation_allocation AS r
ON j.g_request_id = r.g_request_id
INNER JOIN g_allocation AS a
ON date_alloc = a.g_id
WHERE a.g_start_time >= ?
AND a.g_start_time <= ?
GROUP BY date_alloc
ORDER BY a.g_start_time, a.g_end_time, j.g_project""")
return query
# Get data for all institutes with an allocation that begins on this exact date.
def gold_by_allocation_start_date():
query = ("""SELECT j.g_project, sum(j.g_charge), r.g_id as date_alloc, r.g_account,
a.g_start_time, a.g_end_time, count(*) as num_jobs
FROM g_job AS j
INNER JOIN g_reservation_allocation AS r
ON j.g_request_id = r.g_request_id
INNER JOIN g_allocation AS a
ON date_alloc = a.g_id
WHERE a.g_start_time = ?
GROUP BY date_alloc
ORDER BY a.g_end_time, j.g_project""")
return query
| 46.333333
| 87
| 0.602068
| 350
| 2,224
| 3.557143
| 0.2
| 0.027309
| 0.04498
| 0.070683
| 0.767068
| 0.728514
| 0.728514
| 0.728514
| 0.728514
| 0.706827
| 0
| 0.002015
| 0.330486
| 2,224
| 47
| 88
| 47.319149
| 0.834117
| 0.196493
| 0
| 0.805556
| 0
| 0.083333
| 0.866142
| 0.040495
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
117772d0f8c2d02776b1cf5ed644a390ffaeba12
| 13,483
|
py
|
Python
|
test_union_pay.py
|
moehuster/test_union_pay
|
a7b523fd5d1dbd17d19c9cc3df27d7976289d29c
|
[
"MIT"
] | 1
|
2019-11-06T09:01:06.000Z
|
2019-11-06T09:01:06.000Z
|
test_union_pay.py
|
moehuster/test_union_pay
|
a7b523fd5d1dbd17d19c9cc3df27d7976289d29c
|
[
"MIT"
] | 1
|
2019-11-06T09:02:13.000Z
|
2019-11-16T05:18:36.000Z
|
test_union_pay.py
|
moehuster/test_union_pay
|
a7b523fd5d1dbd17d19c9cc3df27d7976289d29c
|
[
"MIT"
] | 3
|
2019-04-01T03:54:20.000Z
|
2020-05-31T04:00:44.000Z
|
# -*- coding: utf-8 -*-
'''
银联支付接口测试程序
'''
import socket
import struct
from datetime import datetime
from binascii import hexlify
from binascii import unhexlify
from functools import reduce
import pydes
import py8583
import py8583spec
HOST = '202.101.25.188'
PORT = 20140
def parse_package(data):
""" 银联8583报文解析 """
if len(data) <= 2:
return
data_len = struct.unpack_from("!H", data[:2])[0]
if data_len != len(data) - 2:
print("Invalid length {0} - {1}".format(data_len, len(data) - 2))
else:
iso_packet = py8583.Iso8583(IsoMsg=data[2:], IsoSpec=py8583spec.IsoSpec1987BCD())
iso_packet.PrintMessage()
def calc_pinblock(**kw):
""" 计算PinBlock """
tmk = kw.get("TMK", "159D86C7C1F779EA29F77A6858E0DA2A")
pik = kw.get("PIK", "75CAD854C2E59A5EEDD7CA7410C2C215")
pan = kw.get("PAN", "6212142000000000012")
passwd = kw.get("passwd", "123456")
des3 = pydes.triple_des(unhexlify(tmk))
pinkey = des3.decrypt(unhexlify(pik))
des3 = pydes.triple_des(pinkey)
pinblock = unhexlify(('06'+passwd).ljust(16, 'F'))
customer_data = unhexlify('0000'+pan[-13:-1])
plain_pin = list(map(lambda x, y: x ^ y, pinblock, customer_data))
return hexlify(des3.encrypt(plain_pin)).decode('latin1').upper()
def calc_mac_ecb(**kw):
""" 计算报文MAC值(ECB算法) """
tmk = kw.get("TMK", "159D86C7C1F779EA29F77A6858E0DA2A")
mak = kw.get("MAK", "E6218EF29513B143")
mab = kw.get("MAB", None)
mab += ''.rjust(8-len(mab) % 8, '\x00').encode('latin1')
mab = reduce(lambda x, y: bytes(list(map(lambda a, b: a ^ b, x, y))),
[mab[i:i+8] for i in range(0, len(mab), 8)])
des3 = pydes.triple_des(unhexlify(tmk))
mackey = des3.decrypt(unhexlify(mak))
des = pydes.des(mackey)
ret = des.encrypt(hexlify(mab[:4]).upper())
ret = bytes(list(map(lambda x, y: x ^ y, ret, hexlify(mab[4:]).upper())))
ret = des.encrypt(ret)
return hexlify(hexlify(ret[:4]).upper()).decode('latin1')
def calc_mac_cbc(**kw):
""" 计算报文MAC值(CBC算法) """
tmk = kw.get("TMK", "159D86C7C1F779EA29F77A6858E0DA2A")
mak = kw.get("MAK", "E6218EF29513B143")
vec = kw.get("IV", "\x00\x00\x00\x00\x00\x00\x00\x00")
mab = kw.get("MAB", None)
vec = vec.encode('latin1')
mab += ''.rjust(8-len(mab) % 8, '\x00').encode('latin1')
des3 = pydes.triple_des(unhexlify(tmk))
mackey = des3.decrypt(unhexlify(mak))
des = pydes.des(mackey)
for i in range(0, len(mab), 8):
vec = bytes(list(map(lambda a, b: a ^ b, vec, mab[i:i+8])))
vec = des.encrypt(vec)
return hexlify(vec).upper().decode('latin1')
def terminal_checkin(**kw):
""" 设备终端签到 """
now = datetime.now()
req_packet = py8583.Iso8583(IsoSpec=py8583spec.IsoSpec1987BCD())
req_packet.MTI('0800')
req_packet.TPDU('6005810000')
req_packet.HEADER('603100000000')
req_packet.FieldData(11, int(now.strftime('%H%M%S'))) # 终端流水
req_packet.FieldData(41, kw.get('TerminalNo', '52010009')) # 终端代码
req_packet.FieldData(42, kw.get('MerchantNo', '898520154110004')) # 商户代码
req_packet.FieldData(60, now.strftime('00%y%m%d003'))
print("设备终端签到:")
req_packet.PrintMessage()
data = req_packet.BuildIso()
data = struct.pack('!H', len(data)) + data
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
py8583.MemDump("Sending: ", data)
sock.send(data)
data = sock.recv(4096)
py8583.MemDump('Received: ', data)
sock.close()
parse_package(data)
def balance_query(**kw):
""" 账户余额查询 """
now = datetime.now()
req_packet = py8583.Iso8583(IsoSpec=py8583spec.IsoSpec1987BCD())
req_packet.MTI('0200')
req_packet.TPDU('6005810000')
req_packet.HEADER('603100000000')
req_packet.FieldData(2, kw.get('PAN', '6212142000000000012')) # 主账号
req_packet.FieldData(3, '300000') # 交易处理码
req_packet.FieldData(11, now.strftime('%H%M%S')) # 终端交易流水
req_packet.FieldData(14, '2912') # 卡有效期
req_packet.FieldData(22, '051') # 服务点输入方式
req_packet.FieldData(23, kw.get('CardOrder', '000')) # 卡序列号
req_packet.FieldData(25, '00') # 服务点条件码
req_packet.FieldData(26, '06') # 服务点PIN获取码
req_packet.FieldData(35, kw.get('Track2', '6212142000000000012=29122206899031006')) # 二磁道数据
req_packet.FieldData(36, kw.get('Track3', None)) # 三磁道数据
req_packet.FieldData(41, kw.get('TerminalNo', '52010009')) # 终端代码
req_packet.FieldData(42, kw.get('MerchantNo', '898520154110004')) # 商户代码
req_packet.FieldData(49, '156') # 交易货币代码
req_packet.FieldData(52, kw.get('PinBlock', None)) # 个人标识码数据
req_packet.FieldData(53, '2600000000000000') # 安全控制信息
req_packet.FieldData(55, kw.get('ICData', None)) # IC卡数据域
req_packet.FieldData(60, now.strftime('01%y%m%d00000060'))
req_packet.FieldData(64, '0000000000000000')
req_packet.FieldData(64, calc_mac_cbc(MAB=req_packet.BuildIso()[11:-8])) # 报文鉴别码
print("账户余额查询:")
req_packet.PrintMessage()
data = req_packet.BuildIso()
data = struct.pack('!H', len(data)) + data
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
py8583.MemDump("Sending: ", data)
sock.send(data)
data = sock.recv(4096)
py8583.MemDump('Received: ', data)
sock.close()
parse_package(data)
def balance_payment(**kw):
""" 缴费 """
now = datetime.now()
req_packet = py8583.Iso8583(IsoSpec=py8583spec.IsoSpec1987BCD())
req_packet.MTI('0200')
req_packet.TPDU('6005810000')
req_packet.HEADER('603100000000')
req_packet.FieldData(2, kw.get('PAN', '6212142000000000012')) # 主账号
req_packet.FieldData(3, '190000') # 交易处理码
req_packet.FieldData(4, kw.get('amount', '3').rjust(12, '0')) # 交易金额
req_packet.FieldData(11, now.strftime('%H%M%S')) # 终端交易流水
req_packet.FieldData(14, '2912') # 卡有效期
req_packet.FieldData(22, '051') # 服务点输入方式
req_packet.FieldData(23, kw.get('CardOrder', '000')) # 卡序列号
req_packet.FieldData(25, '81') # 服务点条件码
req_packet.FieldData(26, '06') # 服务点PIN获取码
req_packet.FieldData(35, kw.get('Track2', '6212142000000000012=29122206899031006')) # 二磁道数据
req_packet.FieldData(36, kw.get('Track3', None)) # 三磁道数据
req_packet.FieldData(41, kw.get('TerminalNo', '52010009')) # 终端代码
req_packet.FieldData(42, kw.get('MerchantNo', '898520154110004')) # 商户代码
req_packet.FieldData(48, 'KP77SG0C26323520140909356184 70000000201809#') # 行业特定信息
req_packet.FieldData(49, '156') # 交易货币代码
req_packet.FieldData(52, kw.get('PinBlock', None)) # 个人标识码数据
req_packet.FieldData(53, '2600000000000000') # 安全控制信息
req_packet.FieldData(55, kw.get('ICData', None)) # IC卡数据域
req_packet.FieldData(60, now.strftime('22%y%m%d00000060'))
req_packet.FieldData(64, '0000000000000000')
req_packet.FieldData(64, calc_mac_cbc(MAB=req_packet.BuildIso()[11:-8])) # 报文鉴别码
print("缴费: ")
req_packet.PrintMessage()
data = req_packet.BuildIso()
data = struct.pack('!H', len(data)) + data
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
py8583.MemDump("Sending: ", data)
sock.send(data)
data = sock.recv(4096)
py8583.MemDump('Received: ', data)
sock.close()
parse_package(data)
def payment_revoke(**kw):
""" 缴费撤销 """
now = datetime.now()
req_packet = py8583.Iso8583(IsoSpec=py8583spec.IsoSpec1987BCD())
req_packet.MTI('0200')
req_packet.TPDU('6005810000')
req_packet.HEADER('603100000000')
req_packet.FieldData(2, kw.get('PAN', '6212142000000000012')) # 主账号
req_packet.FieldData(3, '280000') # 交易处理码
req_packet.FieldData(4, kw.get('amount', '1').rjust(12, '0')) # 交易金额
req_packet.FieldData(11, now.strftime('%H%M%S')) # 终端交易流水
req_packet.FieldData(14, '2912') # 卡有效期
req_packet.FieldData(22, '051') # 服务点输入方式
req_packet.FieldData(23, kw.get('CardOrder', '000')) # 卡序列号
req_packet.FieldData(25, '81') # 服务点条件码
req_packet.FieldData(26, '06') # 服务点PIN获取码
req_packet.FieldData(35, kw.get('Track2', '6212142000000000012=29122206899031006')) # 二磁道数据
req_packet.FieldData(36, kw.get('Track3', None)) # 三磁道数据
req_packet.FieldData(37, kw.get('ReferNo', None)) # 原交易参考号
req_packet.FieldData(38, kw.get('AuthNo', None)) # 原交易授权码
req_packet.FieldData(41, kw.get('TerminalNo', '52010009')) # 终端代码
req_packet.FieldData(42, kw.get('MerchantNo', '898520154110004')) # 商户代码
req_packet.FieldData(48, 'KP77SG0C26323520140909356184 70000000201809#') # 行业特定信息
req_packet.FieldData(49, '156') # 交易货币代码
req_packet.FieldData(52, kw.get('PinBlock', None)) # 个人标识码数据
req_packet.FieldData(53, '2600000000000000') # 安全控制信息
req_packet.FieldData(60, now.strftime('22%y%m%d00000060'))
req_packet.FieldData(61, kw.get('Field61', None)) # 原始交易信息
req_packet.FieldData(64, '0000000000000000')
req_packet.FieldData(64, calc_mac_cbc(MAB=req_packet.BuildIso()[11:-8])) # 报文鉴别码
print("缴费撤销: ")
req_packet.PrintMessage()
data = req_packet.BuildIso()
data = struct.pack('!H', len(data)) + data
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
py8583.MemDump("Sending: ", data)
sock.send(data)
data = sock.recv(4096)
py8583.MemDump('Received: ', data)
sock.close()
parse_package(data)
def payment_reversal(**kw):
""" 冲正交易 """
req_packet = py8583.Iso8583(IsoSpec=py8583spec.IsoSpec1987BCD())
req_packet.MTI('0400')
req_packet.TPDU('6005810000')
req_packet.HEADER('603100000000')
req_packet.FieldData(3, '190000') # 交易处理码
req_packet.FieldData(4, kw.get('amount', '1').rjust(12, '0')) # 交易金额
req_packet.FieldData(11, kw.get('TraceNo', None)) # 原交易流水
req_packet.FieldData(14, '2912') # 卡有效期
req_packet.FieldData(22, '051') # 服务点输入方式
req_packet.FieldData(23, kw.get('CardOrder', '000')) # 卡序列号
req_packet.FieldData(25, '81') # 服务点条件码
req_packet.FieldData(35, kw.get('Track2', '6212142000000000012=29122206899031006')) # 二磁道数据
req_packet.FieldData(36, kw.get('Track3', None)) # 三磁道数据
req_packet.FieldData(38, kw.get('AuthNo', None)) # 原交易授权码
req_packet.FieldData(39, '96') # 冲正原因
req_packet.FieldData(41, kw.get('TerminalNo', '52010009')) # 终端代码
req_packet.FieldData(42, kw.get('MerchantNo', '898520154110004')) # 商户代码
req_packet.FieldData(48, 'KP77SG0C26323520140909356184 70000000201809#') #行业特定信息
req_packet.FieldData(49, '156') # 交易货币代码
req_packet.FieldData(55, kw.get('ICData', None)) # IC卡数据域
req_packet.FieldData(60, kw.get('Field60', None))
req_packet.FieldData(61, kw.get('Field61', None)) # 原始交易信息
req_packet.FieldData(64, '0000000000000000')
req_packet.FieldData(64, calc_mac_cbc(MAB=req_packet.BuildIso()[11:-8])) # 报文鉴别码
print("冲正交易: ")
req_packet.PrintMessage()
data = req_packet.BuildIso()
data = struct.pack('!H', len(data)) + data
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
py8583.MemDump("Sending: ", data)
sock.send(data)
data = sock.recv(4096)
py8583.MemDump('Received: ', data)
sock.close()
parse_package(data)
def profession_query(**kw):
""" 行业信息查询 """
now = datetime.now()
req_packet = py8583.Iso8583(IsoSpec=py8583spec.IsoSpec1987BCD())
req_packet.MTI('0100')
req_packet.TPDU('6005810000')
req_packet.HEADER('603100000000')
req_packet.FieldData(3, '310000') # 交易处理码
req_packet.FieldData(11, now.strftime('%H%M%S')) # 终端交易流水
req_packet.FieldData(25, '87') # 服务点条件码
req_packet.FieldData(41, kw.get('TerminalNo', '52010009')) # 终端代码
req_packet.FieldData(42, kw.get('MerchantNo', '898520154110004')) # 商户代码
req_packet.FieldData(48, kw.get('OrgCode', None)) #行业特定信息
req_packet.FieldData(64, '0000000000000000')
req_packet.FieldData(64, calc_mac_cbc(MAB=req_packet.BuildIso()[11:-8])) # 报文鉴别码
print("行业信息查询: ")
req_packet.PrintMessage()
data = req_packet.BuildIso()
data = struct.pack('!H', len(data)) + data
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
py8583.MemDump("Sending: ", data)
sock.send(data)
data = sock.recv(4096)
py8583.MemDump('Received: ', data)
sock.close()
parse_package(data)
if __name__ == '__main__':
#terminal_checkin(TerminalNo='52010009')
#balance_query(PinBlock=calc_pinblock(), ICData='9F2608BD23789651C50E119F2701809F101307010103A0A804010A010000045796F2D315039F3704A1DD65379F36020FFE950580800460009A031811139C01309F02060000000000005F2A02015682027C009F1A0201569F03060000000000009F3303604800')
#balance_payment(PinBlock=calc_pinblock(), ICData='9F2608BD23789651C50E119F2701809F101307010103A0A804010A010000045796F2D315039F3704A1DD65379F36020FFE950580800460009A031811139C01309F02060000000000005F2A02015682027C009F1A0201569F03060000000000009F3303604800')
#payment_reversal(amount='1', TraceNo='160310', AuthNo=None, Field60='2218111400000060', Field61='1811141603101114000000000000002000')
payment_revoke(amount='3', ReferNo='204304908107', AuthNo=None, Field61='1811142043021114000000000000002000')
| 43.775974
| 262
| 0.663725
| 1,648
| 13,483
| 5.305825
| 0.148058
| 0.139982
| 0.195563
| 0.022873
| 0.82651
| 0.756176
| 0.751258
| 0.744625
| 0.735247
| 0.728156
| 0
| 0.176635
| 0.176593
| 13,483
| 307
| 263
| 43.918567
| 0.610971
| 0.099681
| 0
| 0.715909
| 0
| 0
| 0.161713
| 0.036488
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037879
| false
| 0.007576
| 0.034091
| 0
| 0.087121
| 0.026515
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
eeb0995e19d16813f87fd2928bf2ce1a80d9e1f9
| 239
|
py
|
Python
|
nmigen_boards/upduino_v1.py
|
lethalbit/nmigen-boards
|
aaf18252e457ff95257137da2a629820c0ff2bfa
|
[
"BSD-2-Clause"
] | 11
|
2021-12-10T12:23:29.000Z
|
2022-03-13T08:40:20.000Z
|
nmigen_boards/upduino_v1.py
|
lethalbit/nmigen-boards
|
aaf18252e457ff95257137da2a629820c0ff2bfa
|
[
"BSD-2-Clause"
] | 12
|
2021-12-11T18:51:29.000Z
|
2022-03-12T05:08:52.000Z
|
nmigen_boards/upduino_v1.py
|
lethalbit/nmigen-boards
|
aaf18252e457ff95257137da2a629820c0ff2bfa
|
[
"BSD-2-Clause"
] | 7
|
2021-12-12T07:20:21.000Z
|
2022-03-06T06:20:55.000Z
|
from amaranth_boards.upduino_v1 import *
from amaranth_boards.upduino_v1 import __all__
import warnings
warnings.warn("instead of nmigen_boards.upduino_v1, use amaranth_boards.upduino_v1",
DeprecationWarning, stacklevel=2)
| 29.875
| 84
| 0.803347
| 31
| 239
| 5.806452
| 0.516129
| 0.288889
| 0.333333
| 0.383333
| 0.366667
| 0.366667
| 0
| 0
| 0
| 0
| 0
| 0.024272
| 0.138075
| 239
| 7
| 85
| 34.142857
| 0.849515
| 0
| 0
| 0
| 0
| 0
| 0.280335
| 0.213389
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
eed0ac27fc64a6daaaa9d885c511c65b5b957379
| 121
|
py
|
Python
|
tests/test_dummy.py
|
birgirst/python-flask-docker-hello-world
|
eb9dd186400a503cb6719fb4c8650c896f5f2d46
|
[
"Apache-2.0"
] | null | null | null |
tests/test_dummy.py
|
birgirst/python-flask-docker-hello-world
|
eb9dd186400a503cb6719fb4c8650c896f5f2d46
|
[
"Apache-2.0"
] | null | null | null |
tests/test_dummy.py
|
birgirst/python-flask-docker-hello-world
|
eb9dd186400a503cb6719fb4c8650c896f5f2d46
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8
import app
def test_dummy():
assert app.hello_world() == "Hello World!"
| 13.444444
| 46
| 0.636364
| 18
| 121
| 4.166667
| 0.833333
| 0.266667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010101
| 0.181818
| 121
| 8
| 47
| 15.125
| 0.747475
| 0.31405
| 0
| 0
| 0
| 0
| 0.148148
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
eef41ab38e765e048908a7c0cd50df081a1517f9
| 15,452
|
py
|
Python
|
test/test_skill_check.py
|
velian/DSAbot
|
7491c90ca3ecd30ea023bb039e31c6053f2bdfef
|
[
"MIT"
] | 1
|
2021-05-10T12:45:01.000Z
|
2021-05-10T12:45:01.000Z
|
test/test_skill_check.py
|
velian/DSAbot
|
7491c90ca3ecd30ea023bb039e31c6053f2bdfef
|
[
"MIT"
] | 46
|
2020-08-13T14:52:53.000Z
|
2021-06-08T15:26:40.000Z
|
test/test_skill_check.py
|
Fidge123/DSAbot
|
7491c90ca3ecd30ea023bb039e31c6053f2bdfef
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest.mock import MagicMock, patch
from bot.checks import SkillCheck
from test.mocks import MockAuthor
class TestSkillCheck(TestCase):
@classmethod
def setUpClass(self) -> None:
self.author = MockAuthor("TestUser")
def test_parse(self):
self.assertIsNotNone(SkillCheck(self.author, "13 14 15@2"))
self.assertIsNotNone(SkillCheck(self.author, "1,12,18@18"))
self.assertIsNotNone(SkillCheck(self.author, "8 19 1400@0 + 14"))
self.assertIsNotNone(SkillCheck(self.author, "2 2,2, @1400-2-2-2"))
self.assertIsNotNone(SkillCheck(self.author, "!13 1 12@2 +1+1 Test"))
self.assertIsNotNone(SkillCheck(self.author, "! 1,12,18@18 Krit"))
self.assertIsNotNone(SkillCheck(self.author, "14 14 14@5+2FP Spezialisierung"))
self.assertIsNotNone(SkillCheck(self.author, "14 14 14 @ 5 -2 +3 -5FP +3FP"))
with self.assertRaises(ValueError):
SkillCheck(self.author, "!!13 1@2")
with self.assertRaises(ValueError):
SkillCheck(self.author, "! 1 13@0")
with self.assertRaises(ValueError):
SkillCheck(self.author, "!?4")
with self.assertRaises(ValueError):
SkillCheck(self.author, "#2,2,2@2")
def test_parse_with_other_commands(self):
with self.assertRaises(ValueError):
SkillCheck(self.author, "d3")
with self.assertRaises(ValueError):
SkillCheck(self.author, "note:foobar")
with self.assertRaises(ValueError):
SkillCheck(self.author, "SUMMON")
with self.assertRaises(ValueError):
SkillCheck(self.author, "BEGONE")
with self.assertRaises(ValueError):
SkillCheck(self.author, "DIE")
with self.assertRaises(ValueError):
SkillCheck(self.author, "13,13,13+1")
with self.assertRaises(ValueError):
SkillCheck(self.author, "13")
@patch("random.randint", new_callable=MagicMock())
def test_quality_level(self, mock_randint: MagicMock):
mock_randint.return_value = 2
sc = SkillCheck(self.author, "11,9,9@0")
self.assertEqual(sc.skill_points, 0)
self.assertEqual(sc.ql(sc.skill_points), 1)
sc = SkillCheck(self.author, "11,9,9@1")
self.assertEqual(sc.skill_points, 1)
self.assertEqual(sc.ql(sc.skill_points), 1)
sc = SkillCheck(self.author, "11,9,9@2")
self.assertEqual(sc.skill_points, 2)
self.assertEqual(sc.ql(sc.skill_points), 1)
sc = SkillCheck(self.author, "11,9,9@3")
self.assertEqual(sc.skill_points, 3)
self.assertEqual(sc.ql(sc.skill_points), 1)
sc = SkillCheck(self.author, "11,9,9@4")
self.assertEqual(sc.skill_points, 4)
self.assertEqual(sc.ql(sc.skill_points), 2)
sc = SkillCheck(self.author, "11,9,9@5")
self.assertEqual(sc.skill_points, 5)
self.assertEqual(sc.ql(sc.skill_points), 2)
sc = SkillCheck(self.author, "11,9,9@6")
self.assertEqual(sc.skill_points, 6)
self.assertEqual(sc.ql(sc.skill_points), 2)
sc = SkillCheck(self.author, "11,9,9@7")
self.assertEqual(sc.skill_points, 7)
self.assertEqual(sc.ql(sc.skill_points), 3)
sc = SkillCheck(self.author, "11,9,9@8")
self.assertEqual(sc.skill_points, 8)
self.assertEqual(sc.ql(sc.skill_points), 3)
sc = SkillCheck(self.author, "11,9,9@9")
self.assertEqual(sc.skill_points, 9)
self.assertEqual(sc.ql(sc.skill_points), 3)
sc = SkillCheck(self.author, "11,9,9@10")
self.assertEqual(sc.skill_points, 10)
self.assertEqual(sc.ql(sc.skill_points), 4)
sc = SkillCheck(self.author, "11,9,9@16")
self.assertEqual(sc.skill_points, 16)
self.assertEqual(sc.ql(sc.skill_points), 6)
sc = SkillCheck(self.author, "11,9,9@26")
self.assertEqual(sc.skill_points, 26)
self.assertEqual(sc.ql(sc.skill_points), 6)
@patch("random.randint", new_callable=MagicMock())
def test_end2end(self, mock_randint: MagicMock):
mock_randint.return_value = 9
sc = SkillCheck(self.author, "11,9,9@4")
self.assertEqual(sc.data["attributes"], [11, 9, 9])
self.assertEqual(sc.data["EAV"], [11, 9, 9])
self.assertEqual(sc.data["force"], False)
self.assertEqual(sc.data["SR"], 4)
self.assertEqual(sc.data["modifier"], 0)
self.assertEqual(sc.data["modifierFP"], 0)
self.assertEqual(sc.data["comment"], "")
self.assertEqual(sc.data["rolls"].rolls, [9, 9, 9])
self.assertEqual(sc.data["rolls"].critical_success, False)
self.assertEqual(sc.data["rolls"].botch, False)
self.assertEqual(sc.routine, False)
self.assertEqual(sc.impossible(), False)
self.assertEqual(sc.diffs, [0, 0, 0])
self.assertEqual(sc.skill_points, 4)
self.assertEqual(
str(sc),
" \n"
"```py\n"
"EEW: 11 9 9\n"
"Würfel: 9 9 9\n"
"FW 4 = 4 FP\n"
"Bestanden mit QS 2\n"
"```",
)
sc = SkillCheck(self.author, "!13 14 15@6-2 Sinnesschärfe")
self.assertEqual(sc.data["attributes"], [13, 14, 15])
self.assertEqual(sc.data["EAV"], [11, 12, 13])
self.assertEqual(sc.data["force"], False)
self.assertEqual(sc.data["SR"], 6)
self.assertEqual(sc.data["modifier"], -2)
self.assertEqual(sc.data["modifierFP"], 0)
self.assertEqual(sc.data["comment"], "Sinnesschärfe")
self.assertEqual(sc.data["rolls"].rolls, [9, 9, 9])
self.assertEqual(sc.data["rolls"].critical_success, False)
self.assertEqual(sc.data["rolls"].botch, False)
self.assertEqual(sc.routine, False)
self.assertEqual(sc.impossible(), False)
self.assertEqual(sc.diffs, [0, 0, 0])
self.assertEqual(sc.skill_points, 6)
self.assertEqual(
str(sc),
" Sinnesschärfe\n"
"```py\n"
"EEW: 11 12 13\n"
"Würfel: 9 9 9\n"
"FW 6 = 6 FP\n"
"Bestanden mit QS 2\n"
"```",
)
sc = SkillCheck(self.author, "!5 3, 4,@16 +1+1 -2- 2 🎉-1")
self.assertEqual(sc.data["attributes"], [5, 3, 4])
self.assertEqual(sc.data["EAV"], [3, 1, 2])
self.assertEqual(sc.data["force"], False)
self.assertEqual(sc.data["SR"], 16)
self.assertEqual(sc.data["modifier"], -2)
self.assertEqual(sc.data["modifierFP"], 0)
self.assertEqual(sc.data["comment"], "🎉-1")
self.assertEqual(sc.data["rolls"].rolls, [9, 9, 9])
self.assertEqual(sc.data["rolls"].critical_success, False)
self.assertEqual(sc.data["rolls"].botch, False)
self.assertEqual(sc.routine, False)
self.assertEqual(sc.impossible(), False)
self.assertEqual(sc.diffs, [-6, -8, -7])
self.assertEqual(sc.skill_points, -5)
self.assertEqual(
str(sc),
" 🎉-1\n"
"```py\n"
"EEW: 3 1 2\n"
"Würfel: 9 9 9\n"
"FW 16 -6 -8 -7 = -5 FP\n"
"Nicht bestanden\n"
"```",
)
sc = SkillCheck(self.author, "14 14 14@5-5FP Spezialisierung")
self.assertEqual(sc.data["attributes"], [14, 14, 14])
self.assertEqual(sc.data["EAV"], [14, 14, 14])
self.assertEqual(sc.data["force"], False)
self.assertEqual(sc.data["SR"], 5)
self.assertEqual(sc.data["modifier"], 0)
self.assertEqual(sc.data["modifierFP"], -5)
self.assertEqual(sc.data["comment"], "Spezialisierung")
self.assertEqual(sc.data["rolls"].rolls, [9, 9, 9])
self.assertEqual(sc.data["rolls"].critical_success, False)
self.assertEqual(sc.data["rolls"].botch, False)
self.assertEqual(sc.routine, False)
self.assertEqual(sc.impossible(), False)
self.assertEqual(sc.diffs, [0, 0, 0])
self.assertEqual(sc.skill_points, 0)
self.assertEqual(
str(sc),
" Spezialisierung\n"
"```py\n"
"EEW: 14 14 14\n"
"Würfel: 9 9 9\n"
"FW 5 -5 = 0 FP\n"
"Bestanden mit QS 1\n"
"```",
)
sc = SkillCheck(self.author, "7 5 6 @ 16 -1 -3 -1FP +6FP +3FP test")
self.assertEqual(sc.data["attributes"], [7, 5, 6])
self.assertEqual(sc.data["EAV"], [3, 1, 2])
self.assertEqual(sc.data["force"], False)
self.assertEqual(sc.data["SR"], 16)
self.assertEqual(sc.data["modifier"], -4)
self.assertEqual(sc.data["modifierFP"], 8)
self.assertEqual(sc.data["comment"], "test")
self.assertEqual(sc.data["rolls"].rolls, [9, 9, 9])
self.assertEqual(sc.data["rolls"].critical_success, False)
self.assertEqual(sc.data["rolls"].botch, False)
self.assertEqual(sc.routine, False)
self.assertEqual(sc.impossible(), False)
self.assertEqual(sc.diffs, [-6, -8, -7])
self.assertEqual(sc.skill_points, 3)
self.assertEqual(
str(sc),
" test\n"
"```py\n"
"EEW: 3 1 2\n"
"Würfel: 9 9 9\n"
"FW 16+8 -6 -8 -7 = 3 FP\n"
"Bestanden mit QS 1\n"
"```",
)
@patch("random.randint", new_callable=MagicMock())
def test_end2end_crit_botch(self, mock_randint: MagicMock):
mock_randint.return_value = 1
sc = SkillCheck(self.author, "2,3,4@4")
self.assertEqual(sc.data["attributes"], [2, 3, 4])
self.assertEqual(sc.data["EAV"], [2, 3, 4])
self.assertEqual(sc.data["force"], False)
self.assertEqual(sc.data["SR"], 4)
self.assertEqual(sc.data["modifier"], 0)
self.assertEqual(sc.data["modifierFP"], 0)
self.assertEqual(sc.data["comment"], "")
self.assertEqual(sc.data["rolls"].rolls, [1, 1, 1])
self.assertEqual(sc.data["rolls"].critical_success, True)
self.assertEqual(sc.data["rolls"].botch, False)
self.assertEqual(sc.routine, False)
self.assertEqual(sc.impossible(), False)
self.assertEqual(sc.diffs, [0, 0, 0])
self.assertEqual(sc.skill_points, 4)
self.assertEqual(
str(sc),
" \n"
"```py\n"
"EEW: 2 3 4\n"
"Würfel: 1 1 1\n"
"FW 4 = 4 FP\n"
"Kritischer Erfolg! (QS 2)\n"
"```",
)
mock_randint.return_value = 20
sc = SkillCheck(self.author, "14 18 18@3 + 2")
self.assertEqual(sc.data["attributes"], [14, 18, 18])
self.assertEqual(sc.data["EAV"], [16, 20, 20])
self.assertEqual(sc.data["force"], False)
self.assertEqual(sc.data["SR"], 3)
self.assertEqual(sc.data["modifier"], 2)
self.assertEqual(sc.data["modifierFP"], 0)
self.assertEqual(sc.data["comment"], "")
self.assertEqual(sc.data["rolls"].rolls, [20, 20, 20])
self.assertEqual(sc.data["rolls"].critical_success, False)
self.assertEqual(sc.data["rolls"].botch, True)
self.assertEqual(sc.routine, False)
self.assertEqual(sc.impossible(), False)
self.assertEqual(sc.diffs, [-4, 0, 0])
self.assertEqual(sc.skill_points, -1)
self.assertEqual(
str(sc),
" \n"
"```py\n"
"EEW: 16 20 20\n"
"Würfel: 20 20 20\n"
"FW 3 -4 = -1 FP\n"
"Patzer!\n"
"```",
)
mock_randint.return_value = 20
sc = SkillCheck(self.author, "18,18 18@3 + 2")
self.assertEqual(sc.data["attributes"], [18, 18, 18])
self.assertEqual(sc.data["EAV"], [20, 20, 20])
self.assertEqual(sc.data["force"], False)
self.assertEqual(sc.data["SR"], 3)
self.assertEqual(sc.data["modifier"], 2)
self.assertEqual(sc.data["modifierFP"], 0)
self.assertEqual(sc.data["comment"], "")
self.assertEqual(sc.data["rolls"].rolls, [20, 20, 20])
self.assertEqual(sc.data["rolls"].critical_success, False)
self.assertEqual(sc.data["rolls"].botch, True)
self.assertEqual(sc.routine, False)
self.assertEqual(sc.impossible(), False)
self.assertEqual(sc.diffs, [0, 0, 0])
self.assertEqual(sc.skill_points, 3)
self.assertEqual(
str(sc),
" \n"
"```py\n"
"EEW: 20 20 20\n"
"Würfel: 20 20 20\n"
"FW 3 = 3 FP\n"
"Patzer! - Automatisch nicht bestanden\n"
"```",
)
@patch("random.randint", new_callable=MagicMock())
def test_end2end_routine_impossible(self, mock_randint: MagicMock):
mock_randint.return_value = 9
sc = SkillCheck(self.author, "14, 14, 14 @ 7 + 1")
self.assertEqual(sc.data["attributes"], [14, 14, 14])
self.assertEqual(sc.data["EAV"], [15, 15, 15])
self.assertEqual(sc.data["force"], False)
self.assertEqual(sc.data["SR"], 7)
self.assertEqual(sc.data["modifier"], 1)
self.assertEqual(sc.data["modifierFP"], 0)
self.assertEqual(sc.data["comment"], "")
self.assertEqual(sc.routine, True)
self.assertEqual(sc.impossible(), False)
self.assertEqual(
str(sc),
" \n```py\n" "Routineprobe: 4 FP = QS 2\n```",
)
sc = SkillCheck(self.author, "!force 13 14 15 @ 10 Sinnesschärfe")
self.assertEqual(sc.data["attributes"], [13, 14, 15])
self.assertEqual(sc.data["EAV"], [13, 14, 15])
self.assertEqual(sc.data["force"], True)
self.assertEqual(sc.data["SR"], 10)
self.assertEqual(sc.data["modifier"], 0)
self.assertEqual(sc.data["modifierFP"], 0)
self.assertEqual(sc.data["comment"], "Sinnesschärfe")
self.assertEqual(sc.data["rolls"].rolls, [9, 9, 9])
self.assertEqual(sc.data["rolls"].critical_success, False)
self.assertEqual(sc.data["rolls"].botch, False)
self.assertEqual(sc.routine, True)
self.assertEqual(sc.impossible(), False)
self.assertEqual(sc.diffs, [0, 0, 0])
self.assertEqual(sc.skill_points, 10)
self.assertEqual(
str(sc),
" Sinnesschärfe\n"
"```py\n"
"EEW: 13 14 15\n"
"Würfel: 9 9 9\n"
"FW 10 = 10 FP\n"
"Bestanden mit QS 4\n"
"```",
)
sc = SkillCheck(self.author, "2,3,4@4-2")
self.assertEqual(sc.data["attributes"], [2, 3, 4])
self.assertEqual(sc.data["EAV"], [0, 1, 2])
self.assertEqual(sc.data["force"], False)
self.assertEqual(sc.data["SR"], 4)
self.assertEqual(sc.data["modifier"], -2)
self.assertEqual(sc.data["modifierFP"], 0)
self.assertEqual(sc.data["comment"], "")
self.assertEqual(sc.routine, False)
self.assertEqual(sc.impossible(), True)
self.assertEqual(
str(sc),
" \n```py\nEEW: 0 1 2\nProbe nicht möglich\n```",
)
| 40.986737
| 87
| 0.561416
| 1,935
| 15,452
| 4.444444
| 0.060465
| 0.315698
| 0.336047
| 0.253953
| 0.918953
| 0.866279
| 0.835349
| 0.742791
| 0.662209
| 0.607791
| 0
| 0.054641
| 0.27634
| 15,452
| 376
| 88
| 41.095745
| 0.714184
| 0
| 0
| 0.612717
| 0
| 0
| 0.15519
| 0
| 0
| 0
| 0
| 0
| 0.578035
| 1
| 0.020231
| false
| 0
| 0.011561
| 0
| 0.034682
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e11335a97e481526cf67b8552071145cc88bb770
| 147
|
py
|
Python
|
src/service1/modules/ping_responder.py
|
tsaqib/python-monorepo
|
77e65af4478082b985bd9018980c42f8d099ee45
|
[
"MIT"
] | 2
|
2021-04-25T20:18:06.000Z
|
2021-09-29T20:38:29.000Z
|
src/service1/modules/ping_responder.py
|
tsaqib/python-monorepo
|
77e65af4478082b985bd9018980c42f8d099ee45
|
[
"MIT"
] | null | null | null |
src/service1/modules/ping_responder.py
|
tsaqib/python-monorepo
|
77e65af4478082b985bd9018980c42f8d099ee45
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from common.utils.formatter import fmt_message
def ping():
return fmt_message("Service1", str(datetime.now()))
| 21
| 55
| 0.768707
| 20
| 147
| 5.55
| 0.7
| 0.18018
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007813
| 0.129252
| 147
| 6
| 56
| 24.5
| 0.859375
| 0
| 0
| 0
| 0
| 0
| 0.054422
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
e12672b9b37a5ad0358bc6dfea8b13d4843da4b7
| 12,215
|
py
|
Python
|
LEDLetterValues.py
|
PHSCRC/phsled
|
e2e0c2a26de3b6c9b6c1be6eea5b21d7f5315b86
|
[
"MIT"
] | 1
|
2015-01-06T18:49:08.000Z
|
2015-01-06T18:49:08.000Z
|
LEDLetterValues.py
|
PHSCRC/phsled
|
e2e0c2a26de3b6c9b6c1be6eea5b21d7f5315b86
|
[
"MIT"
] | null | null | null |
LEDLetterValues.py
|
PHSCRC/phsled
|
e2e0c2a26de3b6c9b6c1be6eea5b21d7f5315b86
|
[
"MIT"
] | null | null | null |
### TODO: Add numbers, lowercase letters, and punctuation
toLED = {'A' :[
[0,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,1,1,1,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1]
],
'B' : [
[1,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[1,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,1,1,1,0]
],
'C' :[
[0,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,1],
[1,0,0,0,1],
[0,1,1,1,0]
] ,
'D' :[
[1,1,1,0,0],
[1,0,0,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,1,0],
[1,1,1,0,0]
],
'E' :[
[1,1,1,1,1],
[1,0,0,0,0],
[1,0,0,0,0],
[1,1,1,1,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,1,1,1,1]
],
'F' :[
[1,1,1,1,1],
[1,0,0,0,0],
[1,0,0,0,0],
[1,1,1,1,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0]
],
'G' : [
[0,1,1,1,0],
[0,1,0,0,1],
[1,0,0,0,1],
[1,0,0,0,0],
[1,0,1,1,1],
[1,0,0,0,1],
[0,0,0,0,1],
[0,1,1,1,0]
],
'H' : [
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,1,1,1,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1]
],
'I' : [
[1,1,1,1,1],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[1,1,1,1,1]
],
'J' : [
[1,1,1,1,1],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[1,0,1,0,0],
[1,0,1,0,0],
[0,1,0,0,0]
],
'K' : [
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,1,0],
[1,0,1,0,0],
[1,1,0,0,0],
[1,0,1,0,0],
[1,0,0,1,0],
[1,0,0,0,1]
],
'L' : [
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,1,1,1,1]
],
'M' : [
[1,0,0,0,1],
[1,1,0,1,1],
[1,0,1,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1]
],
'N' : [
[1,0,0,0,1],
[1,1,0,0,1],
[1,0,1,0,1],
[1,0,1,0,1],
[1,0,1,0,1],
[1,0,1,0,1],
[1,0,0,1,1],
[1,0,0,0,1]
],
'O' : [
[0,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[0,1,1,1,0]
],
'P' : [
[1,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[1,1,1,1,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0]
],
'Q' : [
[0,1,1,0,0],
[1,0,0,1,0],
[1,0,0,1,0],
[1,0,0,1,0],
[1,0,0,1,0],
[1,0,1,1,0],
[1,0,1,1,0],
[1,1,0,0,1]
],
'R' : [
[1,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[1,1,1,1,0],
[1,1,0,0,0],
[1,0,1,0,0],
[1,0,0,1,0],
[1,0,0,0,1]
],
'S' : [
[0,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,0],
[0,1,1,1,0],
[0,0,0,0,1],
[0,0,0,0,1],
[1,0,0,0,1],
[0,1,1,1,0]
],
'T' :[
[1,1,1,1,1],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0]
],
'U' :[
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[0,1,1,1,0]
],
'V':[
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[0,1,0,1,0],
[0,1,0,1,0],
[0,1,0,1,0],
[0,0,1,0,0]
],
'W':[
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,1,0,1],
[1,1,0,1,1],
[1,0,0,0,1]
],
'X':[
[1,0,0,0,1],
[1,0,0,0,1],
[0,1,0,1,0],
[0,1,0,1,0],
[0,1,1,1,0],
[0,1,0,1,0],
[1,0,0,0,1],
[1,0,0,0,1]
],
'Y':[
[1,0,0,0,1],
[0,1,0,1,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0]
],
'Z':[
[1,1,1,1,1],
[0,0,0,1,0],
[0,0,0,1,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,1,0,0,0],
[0,1,0,0,0],
[1,1,1,1,1]
],
' ':[
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0]
],
'.':[
[0],
[0],
[0],
[0],
[0],
[0],
[0],
[1]
],
'!':[
[1],
[1],
[1],
[1],
[1],
[0],
[1],
[1]
],
',':[
[0],
[0],
[0],
[0],
[0],
[0],
[1],
[1]
],
'\"':[
[1,0,1],
[1,0,1],
[0,0,0],
[0,0,0],
[0,0,0],
[0,0,0],
[0,0,0],
[0,0,0]
],
'\'':[
[1],
[1],
[0],
[0],
[0],
[0],
[0],
[0]
],
';':[
[0,0],
[0,1],
[0,1],
[0,0],
[0,1],
[0,1],
[1,0],
[0,0]
],
':':[
[0],
[0],
[1],
[1],
[0],
[1],
[1],
[0]
],
'?':[
[0,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[0,0,0,1,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,0,0,0],
[0,0,1,0,0]
],
'#':[
[0,1,0,1,0],
[0,1,0,1,0],
[1,1,1,1,1],
[0,1,0,1,0],
[0,1,0,1,0],
[1,1,1,1,1],
[0,1,0,0,0],
[0,1,0,1,0]
],
'-':[
[0,0,0],
[0,0,0],
[0,0,0],
[0,0,0],
[1,1,1],
[0,0,0],
[0,0,0],
[0,0,0]
],
'_':[
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[1,1,1,1,1]
],
'+':[
[0,0,0],
[0,0,0],
[0,0,0],
[0,1,0],
[1,1,1],
[0,1,0],
[0,0,0],
[0,0,0]
],
'=':[
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[1,1,1,1],
[0,0,0,0],
[1,1,1,1],
[0,0,0,0],
[0,0,0,0]
],
'@':[
[0,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,1,0,1],
[1,1,0,1,1],
[1,0,1,1,1],
[1,0,0,0,1],
[0,1,1,1,1]
],
'$':[
[0,1,1,1,0],
[1,0,1,0,1],
[1,0,1,0,0],
[0,1,1,1,0],
[0,0,1,0,1],
[0,0,1,0,1],
[1,0,1,0,1],
[0,1,1,1,0]
],
'%':[
[1,1,0,0],
[1,1,0,0],
[0,0,0,1],
[0,0,1,0],
[0,1,0,0],
[1,0,0,0],
[0,0,1,1],
[0,0,1,1]
],
'^':[
[0,1,0],
[1,0,1],
[0,0,0],
[0,0,0],
[0,0,0],
[0,0,0],
[0,0,0],
[0,0,0]
],
'&':[
[0,0,0,0,0],
[0,0,0,0,0],
[0,1,1,1,0],
[1,0,0,0,1],
[0,1,1,0,0],
[0,1,1,0,1],
[1,0,0,1,0],
[0,1,1,0,1]
],
'*':[
[1,1,1],
[1,1,1],
[1,1,1],
[0,0,0],
[0,0,0],
[0,0,0],
[0,0,0],
[0,0,0]
],
'(':[
[0,1],
[1,0],
[1,0],
[1,0],
[1,0],
[1,0],
[1,0],
[0,1]
],
')':[
[1,0],
[0,1],
[0,1],
[0,1],
[0,1],
[0,1],
[0,1],
[1,0]
],
'[':[
[1,1],
[1,0],
[1,0],
[1,0],
[1,0],
[1,0],
[1,0],
[1,1]
],
']':[
[1,1],
[0,1],
[0,1],
[0,1],
[0,1],
[0,1],
[0,1],
[1,1]
],
'}':[
[1,0,0],
[0,1,0],
[0,1,0],
[0,0,1],
[0,1,0],
[0,1,0],
[0,1,0],
[1,0,0]
],
'{':[
[0,0,1],
[0,1,0],
[0,1,0],
[1,0,0],
[0,1,0],
[0,1,0],
[0,1,0],
[0,0,1]
],
'[':[
[1,1],
[1,0],
[1,0],
[1,0],
[1,0],
[1,0],
[1,0],
[1,1]
],
'|':[
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1]
],
'\\':[
[1,0,0,0],
[1,0,0,0],
[0,1,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,1,0],
[0,0,0,1],
[0,0,0,1]
],
'/':[
[0,0,0,1],
[0,0,0,1],
[0,0,1,0],
[0,0,1,0],
[0,1,0,0],
[0,1,0,0],
[1,0,0,0],
[1,0,0,0]
],
'<':[
[0,0,0],
[0,0,0],
[0,0,0],
[0,0,1],
[0,1,0],
[1,0,0],
[0,1,0],
[0,0,1]
],
'>':[
[0,0,0],
[0,0,0],
[0,0,0],
[1,0,0],
[0,1,0],
[0,0,1],
[0,1,0],
[1,0,0]
],
'`':[
[1,0],
[0,1],
[0,0],
[0,0],
[0,0],
[0,0],
[0,0],
[0,0]
],
'~':[
[0,0,0,0],
[0,0,0,0],
[0,1,0,1],
[1,0,1,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0]
],
'a':[
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[1,1,1,1,0],
[0,0,0,0,1],
[0,1,1,0,1],
[1,0,0,1,1],
[0,1,1,0,1]
],
'b':[
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,1,1,0],
[1,1,0,0,1],
[1,1,0,0,1],
[1,0,1,1,0]
],
'c':[
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,0],
[1,0,0,0,1],
[0,1,1,1,0]
],
'd':[
[0,0,0,0,1],
[0,0,0,0,1],
[0,0,0,0,1],
[0,0,0,0,1],
[0,1,1,0,1],
[1,0,0,1,1],
[1,0,0,1,1],
[0,1,1,0,1]
],
'e':[
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,1,1,1,0],
[1,0,0,0,1],
[1,1,1,1,1],
[1,0,0,0,0],
[0,1,1,1,1]
],
'f':[
[0,1,1,0],
[1,0,0,1],
[1,0,0,0],
[1,0,0,0],
[1,1,0,0],
[1,0,0,0],
[1,0,0,0],
[1,0,0,0]
],
'g':[
[0,0,0,0],
[0,0,0,0],
[0,1,1,0],
[1,0,0,1],
[0,1,1,1],
[0,0,0,1],
[1,0,0,1],
[0,1,1,0]
],
'h':[
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,0,0,0,0],
[1,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1]
],
'i':[
[1],
[1],
[0],
[1],
[1],
[1],
[1],
[1]
],
'j':[
[0,0,0,1],
[0,0,0,1],
[0,0,0,0],
[0,0,0,1],
[0,0,0,1],
[0,0,0,1],
[1,0,0,1],
[0,1,1,0]
],
'k':[
[1,0,0],
[1,0,0],
[1,0,0],
[1,0,0],
[1,0,0],
[1,0,1],
[1,1,0],
[1,0,1]
],
'l':[
[1,1,0,],
[0,1,0],
[0,1,0],
[0,1,0],
[0,1,0],
[0,1,0],
[0,1,0],
[0,0,1]
],
'm':[
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[1,1,0,1,0],
[1,0,1,0,1],
[1,0,1,0,1],
[1,0,1,0,1],
[1,0,1,0,1]
],
'n':[
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[1,1,1,0],
[1,0,0,1],
[1,0,0,1],
[1,0,0,1],
[1,0,0,1]
],
'o':[
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,1,1,0],
[1,0,0,1],
[1,0,0,1],
[1,0,0,1],
[0,1,1,0]
],
'p':[
[0,0,0,0],
[0,0,0,0],
[0,1,1,0],
[1,0,0,1],
[1,0,0,1],
[1,1,1,0],
[1,0,0,0],
[1,0,0,0]
],
'q':[
[0,0,0,0],
[0,0,0,0],
[0,1,1,0],
[1,0,0,1],
[1,0,0,1],
[0,1,1,1],
[0,0,0,1],
[0,0,0,1]
],
'r':[
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[1,1,1,0],
[1,0,0,1],
[1,0,0,0],
[1,0,0,0],
[1,0,0,0]
],
's':[
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[0,1,1,1],
[1,0,0,0],
[0,1,1,0],
[0,0,0,1],
[1,1,1,0]
],
't':[
[0,1,0],
[0,1,0],
[1,1,1],
[0,1,0],
[0,1,0],
[0,1,0],
[0,1,0],
[0,1,0]
],
'u':[
[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[1,0,0,1],
[1,0,0,1],
[1,0,0,1],
[1,0,0,1],
[0,1,1,1]
],
'v':[
[0,0,0],
[0,0,0],
[0,0,0],
[1,0,1],
[1,0,1],
[1,0,1],
[1,0,1],
[0,1,0]
],
'w':[
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,1,0,1],
[0,1,0,1,0]
],
'x':[
[0,0,0],
[0,0,0],
[0,0,0],
[1,0,1],
[1,0,1],
[0,1,0],
[1,0,1],
[1,0,1]
],
'y':[
[0,0,0,0],
[0,0,0,0],
[1,0,0,1],
[1,0,0,1],
[0,1,1,1],
[0,0,0,1],
[1,0,0,1],
[0,1,1,0]
],
'z':[
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[1,1,1,1,1],
[0,0,0,1,0],
[0,0,1,0,0],
[0,1,0,0,0],
[1,1,1,1,1]
],
'1':[
[1,1,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[1,1,1,1,1]
],
'2':[
[0,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[0,0,0,0,1],
[0,0,0,1,0],
[0,0,1,0,0],
[0,1,0,0,0],
[1,1,1,1,1]
],
'3':[
[1,1,1,1,0],
[0,0,0,0,1],
[0,0,0,1,0],
[0,0,1,1,0],
[0,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[0,1,1,1,0]
],
'4':[
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,1,1,1,1],
[0,0,0,0,1],
[0,0,0,0,1],
[0,0,0,0,1]
],
'5':[
[1,1,1,1,1],
[1,0,0,0,0],
[1,0,0,0,0],
[1,1,1,1,0],
[1,0,0,0,1],
[0,0,0,0,1],
[1,0,0,0,1],
[0,1,1,1,0]
],
'6':[
[0,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,0],
[1,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[0,1,1,1,0]
],
'7':[
[1,1,1,1,1],
[0,0,0,0,1],
[0,0,0,1,0],
[0,0,0,1,0],
[0,0,1,0,0],
[0,1,0,0,0],
[0,1,0,0,0],
[1,0,0,0,0]
],
'8':[
[0,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[0,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[0,1,1,1,0]
],
'9':[
[0,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[0,1,1,1,1],
[0,0,0,0,1],
[0,0,0,0,1],
[0,0,0,0,1],
[1,1,1,1,0]
],
'0':[
[0,1,1,1,0],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[0,1,1,1,0]
],
'':[
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1]
]
}
def getLetter(char, fallback=None):
"""If a valid custom fallback is not supplied and the
requested character cannot be found, a full block character
will be used instead."""
if fallback and toLED.get(fallback, None):
fallback = toLED[fallback]
else:
fallback = toLED['']
return toLED.get(char, fallback)
def textToArray(text):
phrase = []
for x in text:
phrase.append(getLetter(x))
phrase += toLED[' '] + toLED[' ']
return phrase
def textTo2D(text):
phrase = [[] for i in range(8)]
text += " " + text
for x in text:
for i, v in enumerate(getLetter(x)):
phrase[i] += v
for i in phrase:
i.append(0)
return phrase
| 12.202797
| 67
| 0.301433
| 3,303
| 12,215
| 1.114441
| 0.023615
| 0.681337
| 0.677262
| 0.556371
| 0.858462
| 0.854659
| 0.854659
| 0.854116
| 0.851671
| 0.845422
| 0
| 0.350473
| 0.264429
| 12,215
| 1,000
| 68
| 12.215
| 0.05921
| 0.015309
| 0
| 0.862903
| 0
| 0
| 0.008664
| 0
| 0
| 0
| 0
| 0.001
| 0
| 1
| 0.003024
| false
| 0
| 0
| 0
| 0.006048
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
017d74976b84b2bbae2188cc36d865b6f5e8de4c
| 2,387
|
py
|
Python
|
workspace/.c9/metadata/workspace/WebCrawler/urls.py
|
mannyhappenings/WebCrawler
|
1451fe8e6dc55346a654665d736a7df2115e0c50
|
[
"MIT"
] | null | null | null |
workspace/.c9/metadata/workspace/WebCrawler/urls.py
|
mannyhappenings/WebCrawler
|
1451fe8e6dc55346a654665d736a7df2115e0c50
|
[
"MIT"
] | null | null | null |
workspace/.c9/metadata/workspace/WebCrawler/urls.py
|
mannyhappenings/WebCrawler
|
1451fe8e6dc55346a654665d736a7df2115e0c50
|
[
"MIT"
] | null | null | null |
{"filter":false,"title":"urls.py","tooltip":"/WebCrawler/urls.py","undoManager":{"mark":13,"position":13,"stack":[[{"group":"doc","deltas":[{"start":{"row":9,"column":43},"end":{"row":10,"column":0},"action":"insert","lines":["",""]},{"start":{"row":10,"column":0},"end":{"row":10,"column":4},"action":"insert","lines":[" "]}]}],[{"group":"doc","deltas":[{"start":{"row":10,"column":0},"end":{"row":11,"column":0},"action":"insert","lines":[" url(r'^crawl/', include('crawl.urls')),",""]}]}],[{"group":"doc","deltas":[{"start":{"row":11,"column":0},"end":{"row":11,"column":4},"action":"remove","lines":[" "]}]}],[{"group":"doc","deltas":[{"start":{"row":10,"column":43},"end":{"row":11,"column":0},"action":"remove","lines":["",""]}]}],[{"group":"doc","deltas":[{"start":{"row":10,"column":11},"end":{"row":10,"column":16},"action":"remove","lines":["crawl"]}]}],[{"group":"doc","deltas":[{"start":{"row":10,"column":24},"end":{"row":10,"column":29},"action":"remove","lines":["crawl"]},{"start":{"row":10,"column":24},"end":{"row":10,"column":25},"action":"insert","lines":["c"]}]}],[{"group":"doc","deltas":[{"start":{"row":10,"column":25},"end":{"row":10,"column":26},"action":"insert","lines":["o"]}]}],[{"group":"doc","deltas":[{"start":{"row":10,"column":26},"end":{"row":10,"column":27},"action":"insert","lines":["l"]}]}],[{"group":"doc","deltas":[{"start":{"row":10,"column":27},"end":{"row":10,"column":28},"action":"insert","lines":["l"]}]}],[{"group":"doc","deltas":[{"start":{"row":10,"column":28},"end":{"row":10,"column":29},"action":"insert","lines":["e"]}]}],[{"group":"doc","deltas":[{"start":{"row":10,"column":29},"end":{"row":10,"column":30},"action":"insert","lines":["c"]}]}],[{"group":"doc","deltas":[{"start":{"row":10,"column":30},"end":{"row":10,"column":31},"action":"insert","lines":["t"]}]}],[{"group":"doc","deltas":[{"start":{"row":10,"column":11},"end":{"row":10,"column":12},"action":"remove","lines":["/"]}]}],[{"group":"doc","deltas":[{"start":{"row":10,"column":10},"end":{"row":10,"column":11},"action":"remove","lines":["^"]}]}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":11,"column":45},"end":{"row":11,"column":45},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1427728613000,"hash":"6bb422361498271fcf1ee57d568b441afcf85c29"}
| 2,387
| 2,387
| 0.56305
| 312
| 2,387
| 4.307692
| 0.208333
| 0.100446
| 0.220982
| 0.197917
| 0.563988
| 0.505208
| 0.44494
| 0.36756
| 0.341518
| 0.293899
| 0
| 0.072873
| 0.005446
| 2,387
| 1
| 2,387
| 2,387
| 0.49326
| 0
| 0
| 0
| 0
| 0
| 0.505025
| 0.026382
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0187b968e7b4a33e0d1b907039c7501112ca73c9
| 258
|
py
|
Python
|
entity/cards/BARL_024H/__init__.py
|
x014/lushi_script
|
edab2b88e3f0de8139de2541ab2daa331f777c0e
|
[
"MIT"
] | 102
|
2021-10-20T09:06:39.000Z
|
2022-03-28T13:35:11.000Z
|
entity/cards/BARL_024H/__init__.py
|
x014/lushi_script
|
edab2b88e3f0de8139de2541ab2daa331f777c0e
|
[
"MIT"
] | 98
|
2021-10-19T16:13:27.000Z
|
2022-03-27T13:27:49.000Z
|
entity/cards/BARL_024H/__init__.py
|
x014/lushi_script
|
edab2b88e3f0de8139de2541ab2daa331f777c0e
|
[
"MIT"
] | 55
|
2021-10-19T03:56:50.000Z
|
2022-03-25T08:25:26.000Z
|
# -*- coding: utf-8 -*-
import entity.cards.BARL_024H.LETL_232
import entity.cards.BARL_024H.LETL_233
import entity.cards.BARL_024H.LETL_234
import entity.cards.BARL_024H.LETL_639
import entity.cards.BARL_024H.LETL_638
import entity.cards.BARL_024H.LETL_640
| 32.25
| 38
| 0.829457
| 45
| 258
| 4.488889
| 0.333333
| 0.356436
| 0.504951
| 0.623762
| 0.861386
| 0.861386
| 0
| 0
| 0
| 0
| 0
| 0.153527
| 0.065891
| 258
| 7
| 39
| 36.857143
| 0.684647
| 0.081395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
01984f88683cefd6de08acfbc64f1e8f8fd23756
| 114
|
py
|
Python
|
invariant_point_attention/__init__.py
|
hushuangwei/invariant-point-attention
|
739b6a15a93a795e80be4561c89bdd5499d2c8ff
|
[
"MIT"
] | 100
|
2021-07-17T00:04:48.000Z
|
2022-03-30T06:54:21.000Z
|
invariant_point_attention/__init__.py
|
hushuangwei/invariant-point-attention
|
739b6a15a93a795e80be4561c89bdd5499d2c8ff
|
[
"MIT"
] | 4
|
2021-07-17T11:41:52.000Z
|
2022-01-14T04:10:09.000Z
|
invariant_point_attention/__init__.py
|
hushuangwei/invariant-point-attention
|
739b6a15a93a795e80be4561c89bdd5499d2c8ff
|
[
"MIT"
] | 9
|
2021-07-17T01:03:13.000Z
|
2021-09-29T13:44:13.000Z
|
from invariant_point_attention.invariant_point_attention import InvariantPointAttention, IPABlock, IPATransformer
| 57
| 113
| 0.921053
| 11
| 114
| 9.181818
| 0.727273
| 0.277228
| 0.455446
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 114
| 1
| 114
| 114
| 0.935185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0989fcf5e68e88c596401c132dd5f101e6218b82
| 37,654
|
py
|
Python
|
kneaddata/tests/functional_tests.py
|
zwets/kneaddata
|
16fe3f6bde213f0c7fc5132e65ce7e30c3a95e42
|
[
"MIT"
] | 41
|
2020-06-07T20:07:55.000Z
|
2022-03-12T05:57:11.000Z
|
kneaddata/tests/functional_tests.py
|
zwets/kneaddata
|
16fe3f6bde213f0c7fc5132e65ce7e30c3a95e42
|
[
"MIT"
] | 23
|
2020-05-28T20:41:40.000Z
|
2022-03-17T00:52:06.000Z
|
kneaddata/tests/functional_tests.py
|
zwets/kneaddata
|
16fe3f6bde213f0c7fc5132e65ce7e30c3a95e42
|
[
"MIT"
] | 17
|
2020-08-02T01:35:44.000Z
|
2022-01-21T09:45:42.000Z
|
import unittest
import tempfile
import os
from kneaddata import utilities
from kneaddata import config
import cfg
import utils
def skipIfExeNotFound(exe):
if isinstance(exe,str):
exe=[exe]
if all([utilities.find_exe_in_path(requires, bypass_permissions_check=True) for requires in exe]):
return lambda func: func
return unittest.skip("{} is not installed so test is skipped".format(",".join(exe)))
class TestFunctionalKneadData(unittest.TestCase):
"""
Test KneadData workflows
"""
@skipIfExeNotFound(config.trimmomatic_jar)
def test_trimmomatic_only_no_reference_database_single_end(self):
"""
Test running the default flow of trimmomatic on single end input as no
reference database is provided
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,
"--output",tempdir]
utils.run_kneaddata(command)
# get the basename of the input file
basename=utils.file_basename(cfg.fastq_file)
expected_output_files=[basename+cfg.log_extension,
basename+cfg.single_trim_extension]
# check the output files are as expected
for expression, message in utils.check_output(expected_output_files, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound(config.trimmomatic_jar)
def test_trimmomatic_only_no_reference_database_paired_end(self):
"""
Test running the default flow of trimmomatic on paired end input as no
reference database is provided
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,"--input",cfg.fastq_file,
"--output",tempdir]
utils.run_kneaddata(command)
# get the basename of the input file
basename=utils.file_basename(cfg.fastq_file)
expected_output_files=[basename+cfg.log_extension,
basename+cfg.paired_trim_extensions[0],
basename+cfg.paired_trim_extensions[1]]
# check the output files are as expected
for expression, message in utils.check_output(expected_output_files, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.bowtie2_exe])
def test_trimmomatic_bowtie2_database_single_end(self):
"""
Test running the default flow of trimmomatic on single end input with
bowtie2 database provided
Test with keeping temp files
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,
"--output",tempdir,"--reference-db",cfg.bowtie2_db_folder,
"--store-temp-output"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=utils.file_basename(cfg.fastq_file)
filtered_file_basename=utils.get_filtered_file_basename(basename,cfg.bowtie2_db_folder,"bowtie2")
expected_output_files=[basename+cfg.log_extension,
basename+cfg.single_trim_extension,
filtered_file_basename+cfg.clean_extension,
filtered_file_basename+cfg.contaminated_extension,
filtered_file_basename+cfg.sam_extension,
basename+cfg.final_extension]
# check the output files are as expected
for expression, message in utils.check_output(expected_output_files, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.bowtie2_exe])
def test_trimmomatic_bowtie2_database_paired_end_remove_intermedite_temp_output(self):
"""
Test running the default flow of trimmomatic on paired end input with a
bowtie2 database provided
Test running with remove intermediate temp output files
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,"--input",cfg.fastq_file,
"--output",tempdir,"--reference-db",cfg.bowtie2_db_folder,"--no-discordant"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=basename=utils.file_basename(cfg.fastq_file)
filtered_file_basename=utils.get_filtered_file_basename(basename,cfg.bowtie2_db_folder,"bowtie2",True)
expected_non_empty_output_files=[basename+cfg.log_extension,
basename+cfg.paired_trim_extensions[0],
basename+cfg.paired_trim_extensions[1],
basename+cfg.final_extensions_paired[0],
basename+cfg.final_extensions_paired[1]]
# check the output files are as expected
for expression, message in utils.check_output(expected_non_empty_output_files, tempdir):
self.assertTrue(expression,message)
# add the expected output files which can be empty
expected_output_files=expected_non_empty_output_files
expected_output_files+=[filtered_file_basename+cfg.paired_contaminated_extension[0],
filtered_file_basename+cfg.paired_contaminated_extension[1]]
# check there are only three files in the output folder
actual_output_files=os.listdir(tempdir)
self.assertEqual(len(actual_output_files), len(expected_output_files))
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.bowtie2_exe])
def test_trimmomatic_bowtie2_two_databases_paired_end_remove_intermedite_temp_output(self):
"""
Test running the default flow of trimmomatic on paired end input with two
bowtie2 database provideded (both with the same name)
Test running with remove intermediate temp output files
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,"--input",cfg.fastq_file,
"--output",tempdir,"--reference-db",cfg.bowtie2_db_folder,
"--reference-db",cfg.bowtie2_db_folder,"--no-discordant"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=basename=utils.file_basename(cfg.fastq_file)
filtered_file_basename=utils.get_filtered_file_basename(basename,cfg.bowtie2_db_folder,"bowtie2")
expected_non_empty_output_files=[basename+cfg.log_extension,
basename+cfg.paired_trim_extensions[0],
basename+cfg.paired_trim_extensions[1],
basename+cfg.final_extensions_paired[0],
basename+cfg.final_extensions_paired[1]]
# check the output files are as expected
for expression, message in utils.check_output(expected_non_empty_output_files, tempdir):
self.assertTrue(expression,message)
# add the expected output files which can be empty
expected_output_files=expected_non_empty_output_files
expected_output_files+=[filtered_file_basename+cfg.paired_contaminated_extension[0],
filtered_file_basename+cfg.paired_contaminated_extension[1]]
# check there are at least the main expected files in the output folder
actual_output_files=os.listdir(tempdir)
self.assertGreater(len(actual_output_files), len(expected_output_files))
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.bowtie2_exe])
def test_trimmomatic_bowtie2_two_databases_paired_end_serial(self):
"""
Test running the default flow of trimmomatic on paired end input with two
bowtie2 database provided (both with the same name)
Test running in serial alignment mode
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,"--input",cfg.fastq_file,
"--output",tempdir,"--reference-db",cfg.bowtie2_db_folder,
"--reference-db",cfg.bowtie2_db_folder,"--no-discordant","--serial"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=basename=utils.file_basename(cfg.fastq_file)
filtered_file_basename=utils.get_filtered_file_basename(basename,cfg.bowtie2_db_folder,"bowtie2")
expected_non_empty_output_files=[basename+cfg.log_extension,
basename+cfg.paired_trim_extensions[0],
basename+cfg.paired_trim_extensions[1],
basename+cfg.final_extensions_paired[0],
basename+cfg.final_extensions_paired[1]]
# check the output files are as expected
for expression, message in utils.check_output(expected_non_empty_output_files, tempdir):
self.assertTrue(expression,message)
# add the expected output files which can be empty
expected_output_files=expected_non_empty_output_files
expected_output_files+=[filtered_file_basename+cfg.paired_contaminated_extension[0],
filtered_file_basename+cfg.paired_contaminated_extension[1]]
# check there are at least the main expected files in the output folder
actual_output_files=os.listdir(tempdir)
self.assertGreater(len(actual_output_files), len(expected_output_files))
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.bowtie2_exe])
def test_trimmomatic_bowtie2_paired_end_remove_intermedite_temp_output_discordant(self):
"""
Test running the default flow of trimmomatic on paired end input with one
bowtie2 database provided
Test running with remove intermediate temp output files
Test with discordant alignments
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,"--input",cfg.fastq_pair_file,
"--output",tempdir,"--reference-db",cfg.bowtie2_db_folder,
"--reference-db",cfg.bowtie2_db_folder]
utils.run_kneaddata(command)
# get the basename of the input file
basename=basename=utils.file_basename(cfg.fastq_file)
filtered_file_basename=utils.get_filtered_file_basename(basename,cfg.bowtie2_db_folder,"bowtie2")
expected_non_empty_output_files=[basename+cfg.log_extension,
basename+cfg.paired_trim_extensions[0],
basename+cfg.paired_trim_extensions[1],
basename+cfg.final_extensions_paired[0],
basename+cfg.final_extensions_paired[1]]
# check the output files are as expected
for expression, message in utils.check_output(expected_non_empty_output_files, tempdir):
self.assertTrue(expression,message)
# check there are the expected number of files in the output folder
actual_output_files=list(filter(os.path.getsize,[os.path.join(tempdir,file) for file in os.listdir(tempdir)]))
self.assertEqual(len(actual_output_files), len(expected_non_empty_output_files))
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.bowtie2_exe, config.trf_exe])
def test_trimmomatic_bowtie2_paired_end_remove_intermedite_temp_output_discordant_trf(self):
"""
Test running the default flow of trimmomatic on paired end input with one
bowtie2 database provided
Test running with remove intermediate temp output files
Test with discordant alignments
Test with TRF
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,"--input",cfg.fastq_pair_file,
"--output",tempdir,"--reference-db",cfg.bowtie2_db_folder,
"--reference-db",cfg.bowtie2_db_folder, "--run-trf"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=basename=utils.file_basename(cfg.fastq_file)
filtered_file_basename=utils.get_filtered_file_basename(basename,cfg.bowtie2_db_folder,"bowtie2")
expected_non_empty_output_files=[basename+cfg.log_extension,
basename+cfg.paired_trim_extensions[0],
basename+cfg.paired_trim_extensions[1],
basename+cfg.final_extensions_paired[0],
basename+cfg.final_extensions_paired[1],
basename+cfg.paired_repeats_removed_extensions[0],
basename+cfg.paired_repeats_removed_extensions[1]]
# check the output files are as expected
for expression, message in utils.check_output(expected_non_empty_output_files, tempdir):
self.assertTrue(expression,message)
# check there are the expected number of files in the output folder
actual_output_files=list(filter(os.path.getsize,[os.path.join(tempdir,file) for file in os.listdir(tempdir)]))
self.assertEqual(len(actual_output_files), len(expected_non_empty_output_files))
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.bowtie2_exe, config.trf_exe])
def test_trimmomatic_bowtie2_database_and_trf_single_end(self):
"""
Test running the default flow of trimmomatic on single end input with
bowtie2 database provided
Test with keeping temp files
Test with TRF
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,
"--output",tempdir,"--reference-db",cfg.bowtie2_db_folder,
"--store-temp-output", "--run-trf"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=utils.file_basename(cfg.fastq_file)
filtered_file_basename=utils.get_filtered_file_basename(basename,cfg.bowtie2_db_folder,"bowtie2")
expected_output_files=[basename+cfg.log_extension,
basename+cfg.single_trim_extension,
filtered_file_basename+cfg.clean_extension,
filtered_file_basename+cfg.contaminated_extension,
filtered_file_basename+cfg.sam_extension,
basename+cfg.final_extension,
basename+cfg.repeats_removed_extension]
# check the output files are as expected
for expression, message in utils.check_output(expected_output_files, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.bowtie2_exe, config.trf_exe])
def test_trimmomatic_bowtie2_database_and_trf_paired_end_remove_intermedite_temp_output(self):
"""
Test running the default flow of trimmomatic on paired end input with a
bowtie2 database provided
Test running with remove intermediate temp output files
Test running trf
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,"--input",cfg.fastq_file,
"--output",tempdir,"--reference-db",cfg.bowtie2_db_folder,"--run-trf","--no-discordant"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=basename=utils.file_basename(cfg.fastq_file)
filtered_file_basename=utils.get_filtered_file_basename(basename,cfg.bowtie2_db_folder,"bowtie2",True)
expected_non_empty_output_files=[basename+cfg.log_extension,
basename+cfg.paired_trim_extensions[0],
basename+cfg.paired_trim_extensions[1],
basename+cfg.final_extensions_paired[0],
basename+cfg.final_extensions_paired[1],
basename+cfg.paired_repeats_removed_extensions[0],
basename+cfg.paired_repeats_removed_extensions[1]]
# check the output files are as expected
for expression, message in utils.check_output(expected_non_empty_output_files, tempdir):
self.assertTrue(expression,message)
# add the expected output files which can be empty
expected_output_files=expected_non_empty_output_files
expected_output_files+=[filtered_file_basename+cfg.paired_contaminated_extension[0],
filtered_file_basename+cfg.paired_contaminated_extension[1]]
# check there are only three files in the output folder
actual_output_files=os.listdir(tempdir)
self.assertEqual(len(actual_output_files), len(expected_output_files))
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.trf_exe])
def test_trimmomatic_and_trf_no_reference_database_single_end(self):
"""
Test running the default flow of trimmomatic on single end input as no
reference database is provided
Test with also running trf
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,
"--output",tempdir,"--run-trf"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=utils.file_basename(cfg.fastq_file)
expected_output_files=[basename+cfg.log_extension,
basename+cfg.single_trim_extension,
basename+cfg.repeats_removed_extension]
# check the output files are as expected
for expression, message in utils.check_output(expected_output_files, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.trf_exe])
def test_trimmomatic_and_trf_no_reference_database_paired_end(self):
"""
Test running the default flow of trimmomatic on paired end input as no
reference database is provided
Test with also running trf
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,"--input",cfg.fastq_file,
"--output",tempdir,"--run-trf"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=utils.file_basename(cfg.fastq_file)
expected_output_files=[basename+cfg.log_extension,
basename+cfg.paired_trim_extensions[0],
basename+cfg.paired_trim_extensions[1],
basename+cfg.paired_repeats_removed_extensions[0],
basename+cfg.paired_repeats_removed_extensions[1]]
# check the output files are as expected
for expression, message in utils.check_output(expected_output_files, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound(config.bowtie2_exe)
def test_bowtie2_only_single_end(self):
"""
Test on single end input with bowtie2 database provided
Test with keeping temp files
Test bypassing trim step
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,
"--output",tempdir,"--reference-db",cfg.bowtie2_db_folder,
"--store-temp-output","--bypass-trim"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=utils.file_basename(cfg.fastq_file)
filtered_file_basename=utils.get_filtered_file_basename(basename,cfg.bowtie2_db_folder,"bowtie2")
expected_output_files=[basename+cfg.log_extension,
filtered_file_basename+cfg.clean_extension,
filtered_file_basename+cfg.contaminated_extension,
filtered_file_basename+cfg.sam_extension,
basename+cfg.final_extension]
# check the output files are as expected
for expression, message in utils.check_output(expected_output_files, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound(config.bowtie2_exe)
def test_bowtie2_only_paired_end_remove_intermedite_temp_output(self):
"""
Test running the default flow of trimmomatic on paired end input with a
bowtie2 database provided
Test running with remove intermediate temp output files
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,"--input",cfg.fastq_file,
"--output",tempdir,"--reference-db",cfg.bowtie2_db_folder,"--bypass-trim","--no-discordant"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=basename=utils.file_basename(cfg.fastq_file)
filtered_file_basename=utils.get_filtered_file_basename(basename,cfg.bowtie2_db_folder,"bowtie2",True)
expected_non_empty_output_files=[basename+cfg.log_extension,
basename+cfg.final_extensions_paired[0],
basename+cfg.final_extensions_paired[1]]
# check the output files are as expected
for expression, message in utils.check_output(expected_non_empty_output_files, tempdir):
self.assertTrue(expression,message)
# add the expected output files which can be empty
expected_output_files=expected_non_empty_output_files
expected_output_files+=[filtered_file_basename+cfg.paired_contaminated_extension[0],
filtered_file_basename+cfg.paired_contaminated_extension[1]]
# check there are only three files in the output folder
actual_output_files=os.listdir(tempdir)
self.assertEqual(len(actual_output_files), len(expected_output_files))
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound(config.trf_exe)
def test_trf_only_single_end(self):
"""
Test running trf only on single end input
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,
"--output",tempdir,"--run-trf","--bypass-trim"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=utils.file_basename(cfg.fastq_file)
expected_output_files=[basename+cfg.log_extension,
basename+cfg.repeats_removed_extension]
# check the output files are as expected
for expression, message in utils.check_output(expected_output_files, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound(config.trf_exe)
def test_trf_only_paired_end(self):
"""
Test running only trf on paired end input
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,"--input",cfg.fastq_file,
"--output",tempdir,"--run-trf","--bypass-trim"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=utils.file_basename(cfg.fastq_file)
expected_output_files=[basename+cfg.log_extension,
basename+cfg.paired_repeats_removed_extensions[0],
basename+cfg.paired_repeats_removed_extensions[1]]
# check the output files are as expected
for expression, message in utils.check_output(expected_output_files, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.bowtie2_exe, config.trf_exe])
def test_trimmomatic_bowtie2_database_and_trf_single_end_gzipped_input(self):
"""
Test running the default flow of trimmomatic on single end input with
bowtie2 database provided
Test with keeping temp files
Test with TRF
Test with gzipped input fastq file
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file_gzipped,
"--output",tempdir,"--reference-db",cfg.bowtie2_db_folder,
"--store-temp-output", "--run-trf"]
utils.run_kneaddata(command)
# get the basename of the input file
fastq_file_basename=utils.file_basename(cfg.fastq_file_gzipped)
basename=utils.file_basename(cfg.fastq_file)
filtered_file_basename=utils.get_filtered_file_basename(basename,cfg.bowtie2_db_folder,"bowtie2")
expected_output_files=[basename+cfg.log_extension,
basename+cfg.single_trim_extension,
filtered_file_basename+cfg.clean_extension,
filtered_file_basename+cfg.contaminated_extension,
filtered_file_basename+cfg.sam_extension,
basename+cfg.final_extension,
basename+cfg.repeats_removed_extension]
# check the output files are as expected
for expression, message in utils.check_output(expected_output_files, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.bowtie2_exe, config.trf_exe, config.samtools_exe])
def test_trimmomatic_bowtie2_database_and_trf_single_end_bam_input(self):
"""
Test running the default flow of trimmomatic on single end input with
bowtie2 database provided
Test with keeping temp files
Test with TRF
Test with bam input fastq file
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.file_bam,
"--output",tempdir,"--reference-db",cfg.bowtie2_db_folder,
"--store-temp-output", "--run-trf"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=utils.file_basename(cfg.file_bam)
filtered_file_basename=utils.get_filtered_file_basename(basename,cfg.bowtie2_db_folder,"bowtie2")
expected_output_files=[basename+"_decompressed.fastq",
basename+"_decompressed.sam",
basename+cfg.log_extension,
basename+cfg.single_trim_extension,
filtered_file_basename+cfg.clean_extension,
filtered_file_basename+cfg.contaminated_extension,
filtered_file_basename+cfg.sam_extension,
basename+cfg.final_extension,
basename+cfg.repeats_removed_extension]
# check the output files are as expected
for expression, message in utils.check_output(expected_output_files, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.bowtie2_exe, config.trf_exe])
def test_trimmomatic_bowtie2_database_and_trf_single_end_sam_input(self):
"""
Test running the default flow of trimmomatic on single end input with
bowtie2 database provided
Test with keeping temp files
Test with TRF
Test with sam input fastq file
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.file_sam,
"--output",tempdir,"--reference-db",cfg.bowtie2_db_folder,
"--store-temp-output", "--run-trf"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=utils.file_basename(cfg.file_sam)
filtered_file_basename=utils.get_filtered_file_basename(basename,cfg.bowtie2_db_folder,"bowtie2")
expected_output_files=[basename+"_decompressed.fastq",
basename+cfg.log_extension,
basename+cfg.single_trim_extension,
filtered_file_basename+cfg.clean_extension,
filtered_file_basename+cfg.contaminated_extension,
filtered_file_basename+cfg.sam_extension,
basename+cfg.final_extension,
basename+cfg.repeats_removed_extension]
# check the output files are as expected
for expression, message in utils.check_output(expected_output_files, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.fastqc_exe])
def test_trimmomatic_fastqc_start_no_reference_database_single_end(self):
"""
Test running the default flow of trimmomatic on single end input as no
reference database is provided
Test running fastqc at the beginning of the workflow
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,
"--output",tempdir,"--run-fastqc-start"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=utils.file_basename(cfg.fastq_file)
expected_output_files=[os.path.join("fastqc",basename+cfg.fastqc_extensions[0]),
os.path.join("fastqc",basename+cfg.fastqc_extensions[1]),
basename+cfg.log_extension,
basename+cfg.single_trim_extension]
# check the output files are as expected
for expression, message in utils.check_output(expected_output_files, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.fastqc_exe])
def test_trimmomatic_fastqc_start_no_reference_database_paired_end(self):
"""
Test running the default flow of trimmomatic on paired end input as no
reference database is provided
Test running fastqc at the beginning of the workflow
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,"--input",cfg.fastq_file,
"--output",tempdir,"--run-fastqc-start"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=utils.file_basename(cfg.fastq_file)
expected_output_files=[os.path.join("fastqc",basename+cfg.fastqc_extensions[0]),
os.path.join("fastqc",basename+cfg.fastqc_extensions[1]),
basename+cfg.log_extension,
basename+cfg.paired_trim_extensions[0],
basename+cfg.paired_trim_extensions[1]]
# check the output files are as expected
for expression, message in utils.check_output(expected_output_files, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
@skipIfExeNotFound([config.trimmomatic_jar, config.bowtie2_exe, config.fastqc_exe])
def test_trimmomatic_bowtie2_database_fastqc_end_single_end(self):
"""
Test running the default flow of trimmomatic on single end input with
bowtie2 database provided
Test with keeping temp files
Test running fastqc at the end of the workflow
"""
# create a temp directory for output
tempdir = tempfile.mkdtemp(suffix="test_kneaddata_")
# run kneaddata test
command = ["kneaddata","--input",cfg.fastq_file,
"--output",tempdir,"--reference-db",cfg.bowtie2_db_folder,
"--store-temp-output", "--run-fastqc-end"]
utils.run_kneaddata(command)
# get the basename of the input file
basename=utils.file_basename(cfg.fastq_file)
final_basename=utils.file_basename(basename+cfg.final_extension)
filtered_file_basename=utils.get_filtered_file_basename(basename,cfg.bowtie2_db_folder,"bowtie2")
expected_output_files=[os.path.join("fastqc",final_basename+cfg.fastqc_extensions[0]),
os.path.join("fastqc",final_basename+cfg.fastqc_extensions[1]),
basename+cfg.log_extension,
basename+cfg.single_trim_extension,
filtered_file_basename+cfg.clean_extension,
filtered_file_basename+cfg.contaminated_extension,
filtered_file_basename+cfg.sam_extension,
basename+cfg.final_extension]
# check the output files are as expected
for expression, message in utils.check_output(expected_output_files, tempdir):
self.assertTrue(expression,message)
# remove the temp directory
utils.remove_temp_folder(tempdir)
| 46.031785
| 118
| 0.634381
| 4,133
| 37,654
| 5.528914
| 0.036051
| 0.076539
| 0.051639
| 0.025207
| 0.971467
| 0.967835
| 0.965997
| 0.963853
| 0.961796
| 0.958733
| 0
| 0.005393
| 0.290832
| 37,654
| 817
| 119
| 46.088127
| 0.850354
| 0.191879
| 0
| 0.856436
| 0
| 0
| 0.062104
| 0
| 0
| 0
| 0
| 0
| 0.071782
| 1
| 0.056931
| false
| 0.012376
| 0.017327
| 0
| 0.081683
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
09c3f62bf84f8abd6d716269e2c6154253cd681a
| 1,227
|
py
|
Python
|
arjuna-samples/arjex/test/pkg/grouping/check_group_info_2.py
|
ChandraMouliDisturbs/arjuna
|
4965622fbb01a5e5b6459110c413accc5c483424
|
[
"Apache-2.0"
] | null | null | null |
arjuna-samples/arjex/test/pkg/grouping/check_group_info_2.py
|
ChandraMouliDisturbs/arjuna
|
4965622fbb01a5e5b6459110c413accc5c483424
|
[
"Apache-2.0"
] | null | null | null |
arjuna-samples/arjex/test/pkg/grouping/check_group_info_2.py
|
ChandraMouliDisturbs/arjuna
|
4965622fbb01a5e5b6459110c413accc5c483424
|
[
"Apache-2.0"
] | null | null | null |
from arjuna import *
@for_test
def fix_non_dd(request):
print(request.group.name)
yield 1
@for_test(drive_with=records(record(1), record(2)))
def fix_dd(request):
print(request.group.name)
yield request.data[0]
@test
def check_default_without_fix(request):
print(request.group.thread_name, request.group.config["app.url"], request.group.config["check"])
@test
def check_delegated_non_dd_fix(request, fix_non_dd):
print(request.group.thread_name, request.group.config["app.url"], request.group.config["check"], fix_non_dd)
@test(drive_with=records(record(7,8), record(9,10)))
def check_delegated_non_dd_fix_dd_test(request, data, fix_non_dd):
print(request.group.thread_name, request.group.config["app.url"], request.group.config["check"], data, fix_non_dd)
@test
def check_delegated_dd_fix(request, fix_non_dd, fix_dd):
print(request.group.thread_name, request.group.config["app.url"], request.group.config["check"], fix_non_dd, fix_dd)
@test(drive_with=records(record(7,8), record(9,10)))
def check_delegated_dd_fix_dd_test(request, data, fix_non_dd, fix_dd):
print(request.group.thread_name, request.group.config["app.url"], request.group.config["check"], data, fix_non_dd, fix_dd)
| 38.34375
| 126
| 0.761206
| 202
| 1,227
| 4.356436
| 0.168317
| 0.231818
| 0.204545
| 0.130682
| 0.879545
| 0.827273
| 0.768182
| 0.688636
| 0.688636
| 0.638636
| 0
| 0.012511
| 0.08802
| 1,227
| 32
| 126
| 38.34375
| 0.773905
| 0
| 0
| 0.291667
| 0
| 0
| 0.04886
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.291667
| false
| 0
| 0.041667
| 0
| 0.333333
| 0.291667
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
09d48716c35ce56a4cefc285681818b94df72bef
| 18,035
|
py
|
Python
|
tests/test_alias.py
|
cameron/datahog
|
815178ae576bc4b4e1994ca9fcdc0c1f854bfccf
|
[
"BSD-3-Clause"
] | 4
|
2015-09-09T23:05:39.000Z
|
2016-10-20T15:24:58.000Z
|
tests/test_alias.py
|
cameron/datahog
|
815178ae576bc4b4e1994ca9fcdc0c1f854bfccf
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_alias.py
|
cameron/datahog
|
815178ae576bc4b4e1994ca9fcdc0c1f854bfccf
|
[
"BSD-3-Clause"
] | null | null | null |
# vim: fileencoding=utf8:et:sw=4:ts=8:sts=4
import hashlib
import hmac
import os
import sys
import unittest
import datahog
from datahog import error
import psycopg2
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import base
from pgmock import *
class AliasTests(base.TestCase):
def setUp(self):
super(AliasTests, self).setUp()
datahog.set_context(1, datahog.NODE)
datahog.set_context(2, datahog.ALIAS, {'base_ctx': 1})
def test_set(self):
add_fetch_result([])
add_fetch_result([None])
self.assertEqual(
datahog.alias.set(self.p, 123, 2, 'value'),
True)
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
with selectquery (base_id) as (
select base_id
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
),
insertquery as (
insert into alias_lookup (hash, ctx, base_id, flags)
select %s, %s, %s, %s
where not exists (select 1 from selectquery)
)
select base_id
from selectquery
""", (h, 2, h, 2, 123, 0)),
ROWCOUNT,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
insert into alias (base_id, ctx, value, pos, flags)
select %s, %s, %s, coalesce((
select pos + 1
from alias
where
time_removed is null
and base_id=%s
and ctx=%s
order by pos desc
limit 1
), 1), %s
where exists (
select 1 from node
where
time_removed is null
and id=%s
and ctx=%s
)
""", (123, 2, 'value', 123, 2, 0, 123, 1)),
ROWCOUNT,
COMMIT,
TPC_COMMIT])
def test_set_failure_already_exists(self):
add_fetch_result([(123,)])
self.assertEqual(
datahog.alias.set(self.p, 123, 2, 'value'),
False)
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
with selectquery (base_id) as (
select base_id
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
),
insertquery as (
insert into alias_lookup (hash, ctx, base_id, flags)
select %s, %s, %s, %s
where not exists (select 1 from selectquery)
)
select base_id
from selectquery
""", (h, 2, h, 2, 123, 0)),
ROWCOUNT,
FETCH_ONE,
TPC_ROLLBACK])
def test_set_failure_claimed(self):
add_fetch_result([(124,)])
self.assertRaises(error.AliasInUse,
datahog.alias.set, self.p, 123, 2, 'value')
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
with selectquery (base_id) as (
select base_id
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
),
insertquery as (
insert into alias_lookup (hash, ctx, base_id, flags)
select %s, %s, %s, %s
where not exists (select 1 from selectquery)
)
select base_id
from selectquery
""", (h, 2, h, 2, 123, 0)),
ROWCOUNT,
FETCH_ONE,
TPC_ROLLBACK])
def test_set_race_condition_fallback(self):
@query_fail
def qf():
query_fail(None)
return psycopg2.IntegrityError()
add_fetch_result([(123, 0)])
self.assertEqual(
datahog.alias.set(self.p, 123, 2, 'value'),
False)
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
TPC_BEGIN,
GET_CURSOR,
EXECUTE_FAILURE("""
with selectquery (base_id) as (
select base_id
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
),
insertquery as (
insert into alias_lookup (hash, ctx, base_id, flags)
select %s, %s, %s, %s
where not exists (select 1 from selectquery)
)
select base_id
from selectquery
""", (h, 2, h, 2, 123, 0)),
TPC_ROLLBACK,
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
ROLLBACK])
def test_lookup(self):
add_fetch_result([(123, 0)])
self.assertEqual(
datahog.alias.lookup(self.p, 'value', 2),
{'base_id': 123, 'ctx': 2, 'value': 'value', 'flags': set([])})
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT])
def test_lookup_failure(self):
add_fetch_result([])
self.assertEqual(
datahog.alias.lookup(self.p, 'value', 2),
None)
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
COMMIT])
def test_list(self):
add_fetch_result([(0, 'val1', 0), (0, 'val2', 1), (0, 'val3', 2)])
self.assertEqual(
datahog.alias.list(self.p, 123, 2),
([
{'base_id': 123, 'ctx': 2, 'value': 'val1',
'flags': set([])},
{'base_id': 123, 'ctx': 2, 'value': 'val2',
'flags': set([])},
{'base_id': 123, 'ctx': 2, 'value': 'val3',
'flags': set([])},
], 3))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select flags, value, pos
from alias
where
time_removed is null
and base_id=%s
and ctx=%s
and pos >= %s
order by pos asc
limit %s
""", (123, 2, 0, 100)),
FETCH_ALL,
COMMIT])
def test_list_empty(self):
add_fetch_result([])
self.assertEqual(
datahog.alias.list(self.p, 123, 2),
([], 0))
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select flags, value, pos
from alias
where
time_removed is null
and base_id=%s
and ctx=%s
and pos >= %s
order by pos asc
limit %s
""", (123, 2, 0, 100)),
FETCH_ALL,
COMMIT])
def test_batch(self):
add_fetch_result([
(123, 0, 2, 'val1'),
(124, 0, 2, 'val2'),
(126, 0, 2, 'val3')])
self.assertEqual(
datahog.alias.batch(self.p,
[(123, 2), (124, 2), (125, 2), (126, 2)]),
[
{'base_id': 123, 'flags': set([]), 'ctx': 2,
'value': 'val1'},
{'base_id': 124, 'flags': set([]), 'ctx': 2,
'value': 'val2'},
None,
{'base_id': 126, 'flags': set([]), 'ctx': 2,
'value': 'val3'}])
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
with window_query as (
select base_id, flags, ctx, value, rank() over (
partition by base_id, ctx
order by pos
) as r
from alias
where
time_removed is null
and (base_id, ctx) in ((%s, %s),(%s, %s),(%s, %s),(%s, %s))
)
select base_id, flags, ctx, value
from window_query
where r=1
""", (123, 2, 124, 2, 125, 2, 126, 2)),
FETCH_ALL,
COMMIT])
def test_add_flags(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([(5,)])
add_fetch_result([(5,)])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [1, 3], []),
set([1, 3]))
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update alias_lookup
set flags=flags | %s
where time_removed is null and ctx=%s and hash=%s
returning flags
""", (5, 2, h)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update alias
set flags=flags | %s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (5, 2, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_add_flags_no_alias(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [1, 3], []),
None)
def test_clear_flags(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([(1,)])
add_fetch_result([(1,)])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [], [2, 3]),
set([1]))
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update alias_lookup
set flags=flags & ~%s
where time_removed is null and ctx=%s and hash=%s
returning flags
""", (6, 2, h)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update alias
set flags=flags & ~%s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (6, 2, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_clear_flags_no_alias(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [], [1, 3]),
None)
def test_set_flags_add(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([(5,)])
add_fetch_result([(5,)])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [1, 3], []),
set([1, 3]))
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update alias_lookup
set flags=flags | %s
where time_removed is null and ctx=%s and hash=%s
returning flags
""", (5, 2, h)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update alias
set flags=flags | %s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (5, 2, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_set_flags_clear(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([(4,)])
add_fetch_result([(4,)])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [], [1, 2]),
set([3]))
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update alias_lookup
set flags=flags & ~%s
where time_removed is null and ctx=%s and hash=%s
returning flags
""", (3, 2, h)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update alias
set flags=flags & ~%s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (3, 2, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_set_flags_both(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([(5,)])
add_fetch_result([(5,)])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [1, 3], [2]),
set([1, 3]))
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update alias_lookup
set flags=(flags & ~%s) | %s
where time_removed is null and ctx=%s and hash=%s
returning flags
""", (2, 5, 2, h)),
FETCH_ALL,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
update alias
set flags=(flags & ~%s) | %s
where time_removed is null and ctx=%s and value=%s and base_id=%s
returning flags
""", (2, 5, 2, 'value', 123)),
FETCH_ALL,
COMMIT,
TPC_COMMIT])
def test_set_flags_no_alias(self):
datahog.set_flag(1, 2)
datahog.set_flag(2, 2)
datahog.set_flag(3, 2)
add_fetch_result([(123, 5)])
add_fetch_result([])
self.assertEqual(
datahog.alias.set_flags(self.p, 123, 2, 'value', [], [1, 2]),
None)
def test_shift(self):
add_fetch_result([(True,)])
self.assertEqual(
datahog.alias.shift(self.p, 123, 2, 'value', 3),
True)
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
with oldpos as (
select pos
from alias
where
time_removed is null
and base_id=%s
and ctx=%s
and value=%s
), bump as (
update alias
set pos=pos + (case
when (select pos from oldpos) < pos
then -1
else 1
end)
where
exists (select 1 from oldpos)
and time_removed is null
and base_id=%s
and ctx=%s
and pos between symmetric (select pos from oldpos) and %s
), maxpos(n) as (
select pos
from alias
where
time_removed is null
and base_id=%s
and ctx=%s
order by pos desc
limit 1
), move as (
update alias
set pos=(case
when %s > (select n from maxpos)
then (select n from maxpos)
else %s
end)
where
exists (select 1 from oldpos)
and time_removed is null
and base_id=%s
and ctx=%s
and value=%s
returning 1
)
select exists (select 1 from move)
""", (123, 2, 'value', 123, 2, 3, 123, 2, 3, 3, 123, 2, 'value')),
FETCH_ONE,
COMMIT])
def test_remove(self):
add_fetch_result([(123, 0)])
add_fetch_result([()])
add_fetch_result([()])
self.assertEqual(
datahog.alias.remove(self.p, 123, 2, 'value'),
True)
h = hmac.new(self.p.digestkey, 'value', hashlib.sha1).digest()
self.assertEqual(eventlog, [
GET_CURSOR,
EXECUTE("""
select base_id, flags
from alias_lookup
where
time_removed is null
and hash=%s
and ctx=%s
""", (h, 2)),
ROWCOUNT,
FETCH_ONE,
COMMIT,
TPC_BEGIN,
GET_CURSOR,
EXECUTE("""
update alias_lookup
set time_removed=now()
where
time_removed is null
and hash=%s
and ctx=%s
and base_id=%s
""", (h, 2, 123)),
ROWCOUNT,
TPC_PREPARE,
RESET,
GET_CURSOR,
EXECUTE("""
with removal as (
update alias
set time_removed=now()
where
time_removed is null
and base_id=%s
and ctx=%s
and value=%s
returning pos
), bump as (
update alias
set pos = pos - 1
where
exists (select 1 from removal)
and time_removed is null
and base_id=%s
and ctx=%s
and pos > (select pos from removal)
)
select 1 from removal
""", (123, 2, 'value', 123, 2)),
ROWCOUNT,
COMMIT,
TPC_COMMIT])
if __name__ == '__main__':
unittest.main()
| 24.175603
| 79
| 0.522318
| 2,274
| 18,035
| 3.983729
| 0.067722
| 0.034441
| 0.05409
| 0.065681
| 0.845237
| 0.814218
| 0.793024
| 0.76068
| 0.744232
| 0.721713
| 0
| 0.039744
| 0.351261
| 18,035
| 745
| 80
| 24.208054
| 0.73453
| 0.002273
| 0
| 0.797277
| 0
| 0.001513
| 0.352601
| 0
| 0
| 0
| 0
| 0
| 0.05295
| 1
| 0.03177
| false
| 0
| 0.015129
| 0
| 0.049924
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
61ff0558dbea638cf790cffe0085f678ba9c2504
| 12,312
|
py
|
Python
|
scripts/populate_active_meta_data.py
|
Cooops/ABUpower
|
ec84f8f0177b09a09195a974357d328376df7efc
|
[
"MIT"
] | 2
|
2018-10-09T01:17:21.000Z
|
2019-05-17T11:01:46.000Z
|
scripts/populate_active_meta_data.py
|
Cooops/ABUpower
|
ec84f8f0177b09a09195a974357d328376df7efc
|
[
"MIT"
] | null | null | null |
scripts/populate_active_meta_data.py
|
Cooops/ABUpower
|
ec84f8f0177b09a09195a974357d328376df7efc
|
[
"MIT"
] | null | null | null |
import pandas as pd
from db_queries import fetch_data, get_trace_and_log, prune_active
from gen_utils import database_connection, get_search_words, POWER_CONFIG, DUALS_CONFIG
def generate_stat_history(setCheck, boolCheck):
dataArray = []
if setCheck == 'Alpha' and boolCheck == 'Power':
query = (
f"""
SELECT active_product_nick, avg(active_product_prices), min(active_product_prices), max(active_product_prices), count(active_product_prices), CAST(sum(current_timestamp::date - active_product_start::date) as double precision)/count(active_product_end) as average_length, sum(active_product_prices)
FROM active_products
WHERE active_product_nick IN ('{setCheck} Black Lotus', '{setCheck} Mox Sapphire', '{setCheck} Mox Jet', '{setCheck} Mox Pearl', '{setCheck} Mox Ruby', '{setCheck} Mox Emerald', '{setCheck} Timetwister', '{setCheck} Ancestral Recall', '{setCheck} Time Walk')
GROUP BY active_product_nick;
"""
)
data = fetch_data(query)
dataArray.append(data.values)
return dataArray
elif setCheck != 'Alpha' and boolCheck == 'Power':
query = (
f"""
SELECT active_product_nick, avg(active_product_prices), min(active_product_prices), max(active_product_prices), count(active_product_prices), CAST(sum(current_timestamp::date - active_product_start::date) as double precision)/count(active_product_end) as average_length, sum(active_product_prices)
FROM active_products
WHERE active_product_nick IN ('{setCheck} Black Lotus MTG', '{setCheck} Mox Sapphire', '{setCheck} Mox Jet', '{setCheck} Mox Pearl', '{setCheck} Mox Ruby', '{setCheck} Mox Emerald', '{setCheck} Timetwister', '{setCheck} Ancestral Recall', '{setCheck} Time Walk')
GROUP BY active_product_nick;
"""
)
data = fetch_data(query)
dataArray.append(data.values)
return dataArray
elif boolCheck == 'Duals':
query = (
f"""
SELECT active_product_nick, avg(active_product_prices), min(active_product_prices), max(active_product_prices), count(active_product_prices), CAST(sum(current_timestamp::date - active_product_start::date) as double precision)/count(active_product_end) as average_length, sum(active_product_prices)
FROM active_products
WHERE active_product_nick IN ('{setCheck} Tundra MTG', '{setCheck} Underground Sea MTG', '{setCheck} Badlands MTG', '{setCheck} Taiga MTG', '{setCheck} Savannah MTG', '{setCheck} Scrubland MTG', '{setCheck} Volcanic Island MTG', '{setCheck} Bayou MTG', '{setCheck} Plateau MTG', '{setCheck} Tropical Island MTG')
GROUP BY active_product_nick;
"""
)
data = fetch_data(query)
dataArray.append(data.values)
return dataArray
def generate_index_history(setCheck, setId, boolCheck):
dataArray = []
if setCheck == 'Alpha' and boolCheck == 'Power':
query = (
f"""
SELECT '{setCheck}', '{setId}', sum(stats.avger), sum(stats.miner), sum(stats.maxer), avg(stats.lengther), sum(stats.counter) ,sum(stats.sumer)
FROM (SELECT active_product_nick, avg(active_product_prices) as avger, min(active_product_prices) as miner, max(active_product_prices) as maxer, count(active_product_prices) as counter, CAST(sum(current_timestamp::date - active_product_start::date) as double precision)/count(active_product_end) as lengther, sum(active_product_prices) as sumer
FROM active_products
WHERE active_product_nick IN ('{setCheck} Black Lotus', '{setCheck} Mox Sapphire', '{setCheck} Mox Jet', '{setCheck} Mox Pearl', '{setCheck} Mox Ruby', '{setCheck} Mox Emerald', '{setCheck} Timetwister', '{setCheck} Ancestral Recall', '{setCheck} Time Walk')
GROUP BY active_product_nick) stats;
"""
)
data = fetch_data(query)
dataArray.append(data.values)
return dataArray
elif setCheck != 'Alpha' and boolCheck == 'Power':
query = (
f"""
SELECT '{setCheck}', '{setId}', sum(stats.avger), sum(stats.miner), sum(stats.maxer), avg(stats.lengther), sum(stats.counter) ,sum(stats.sumer)
FROM (SELECT active_product_nick, avg(active_product_prices) as avger, min(active_product_prices) as miner, max(active_product_prices) as maxer, count(active_product_prices) as counter, CAST(sum(current_timestamp::date - active_product_start::date) as double precision)/count(active_product_end) as lengther, sum(active_product_prices) as sumer
FROM active_products
WHERE active_product_nick IN ('{setCheck} Black Lotus MTG', '{setCheck} Mox Sapphire', '{setCheck} Mox Jet', '{setCheck} Mox Pearl', '{setCheck} Mox Ruby', '{setCheck} Mox Emerald', '{setCheck} Timetwister', '{setCheck} Ancestral Recall', '{setCheck} Time Walk')
GROUP BY active_product_nick) stats;
"""
)
data = fetch_data(query)
dataArray.append(data.values)
return dataArray
elif boolCheck == 'Duals':
query = (
f"""
SELECT '{setCheck}', '{setId}', sum(stats.avger), sum(stats.miner), sum(stats.maxer), avg(stats.lengther), sum(stats.counter) ,sum(stats.sumer)
FROM (SELECT active_product_nick, avg(active_product_prices) as avger, min(active_product_prices) as miner, max(active_product_prices) as maxer, count(active_product_prices) as counter, CAST(sum(current_timestamp::date - active_product_start::date) as double precision)/count(active_product_end) as lengther, sum(active_product_prices) as sumer
FROM active_products
WHERE active_product_nick IN ('{setCheck} Tundra MTG', '{setCheck} Underground Sea MTG', '{setCheck} Badlands MTG', '{setCheck} Taiga MTG', '{setCheck} Savannah MTG', '{setCheck} Scrubland MTG', '{setCheck} Volcanic Island MTG', '{setCheck} Bayou MTG', '{setCheck} Plateau MTG', '{setCheck} Tropical Island MTG')
GROUP BY active_product_nick) stats;
"""
)
data = fetch_data(query)
dataArray.append(data.values)
return dataArray
def insert_stats(cursor, mtgArray):
for neach in mtgArray:
for each in neach:
try:
cursor.execute("""INSERT INTO production_active_products_stats(active_product_nick, active_product_avg, active_product_min, active_product_max, active_product_depth, active_product_avg_length, active_product_sum)
VALUES (%s, %s, %s, %s, %s, %s, %s)""", (each[0], each[1], each[2], each[3], each[4], each[5], each[6]))
except Exception as e:
get_trace_and_log(e)
def insert_index(cursor, mtgArray):
for neach in mtgArray:
for each in neach:
try:
cursor.execute("""INSERT INTO production_active_products_index(active_product_set_name, active_product_set_id, active_product_index_avg, active_product_index_min, active_product_index_max, active_product_index_length_avg, active_product_index_count_sum, active_product_index_sum)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)""", (each[0], each[1], each[2], each[3], each[4], each[5], each[6], each[7]))
except Exception as e:
get_trace_and_log(e)
def pipe_duals_stats():
# generate `cursor` (used to execute db queries)
cursor = database_connection()
# iterate over `DUALS_CONFIG` and pipe each nested array.
for each in DUALS_CONFIG:
print(f"Pulling {DUALS_CONFIG[each]} from {each}")
dualsArray = generate_stat_history(setCheck=each, boolCheck=DUALS_CONFIG[each])
if len(dualsArray) > 0:
print(f"Piping nested arrays")
insert_stats(cursor=cursor, mtgArray=dualsArray)
def pipe_power_stats():
cursor = database_connection()
for each in POWER_CONFIG:
print(f"Pulling {POWER_CONFIG[each]} from {each}")
powerArray = generate_stat_history(setCheck=each, boolCheck=POWER_CONFIG[each])
if len(powerArray) > 0:
print(f"Piping nested arrays")
insert_stats(cursor=cursor, mtgArray=powerArray)
def pipe_duals_index():
cursor = database_connection()
for each in DUALS_CONFIG:
if each == 'Alpha':
print(f"Forming {DUALS_CONFIG[each]} index from {each} stats")
dualsArray = generate_index_history(setCheck=each, setId=4, boolCheck=DUALS_CONFIG[each])
if len(dualsArray) > 0:
print(f"Piping nested arrays")
insert_index(cursor=cursor, mtgArray=dualsArray)
elif each == 'Beta':
print(f"Forming {DUALS_CONFIG[each]} index from {each} stats")
dualsArray = generate_index_history(setCheck=each, setId=5, boolCheck=DUALS_CONFIG[each])
if len(dualsArray) > 0:
print(f"Piping nested arrays")
insert_index(cursor=cursor, mtgArray=dualsArray)
elif each == 'Unlimited':
print(f"Forming {DUALS_CONFIG[each]} index from {each} stats")
dualsArray = generate_index_history(setCheck=each, setId=6, boolCheck=DUALS_CONFIG[each])
if len(dualsArray) > 0:
print(f"Piping nested arrays")
insert_index(cursor=cursor, mtgArray=dualsArray)
elif each == 'Revised':
print(f"Forming {DUALS_CONFIG[each]} index from {each} stats")
dualsArray = generate_index_history(setCheck=each, setId=7, boolCheck=DUALS_CONFIG[each])
if len(dualsArray) > 0:
print(f"Piping nested arrays")
insert_index(cursor=cursor, mtgArray=dualsArray)
def pipe_power_index():
cursor = database_connection()
for each in POWER_CONFIG:
if each == 'Alpha':
print(f"Pulling {POWER_CONFIG[each]} from {each} stats")
powerArray = generate_index_history(setCheck=each, setId=1, boolCheck=POWER_CONFIG[each])
if len(powerArray) > 0:
print(f"Piping nested arrays")
insert_index(cursor=cursor, mtgArray=powerArray)
elif each == 'Beta':
print(f"Pulling {POWER_CONFIG[each]} from {each} stats")
powerArray = generate_index_history(setCheck=each, setId=2, boolCheck=POWER_CONFIG[each])
if len(powerArray) > 0:
print(f"Piping nested arrays")
insert_index(cursor=cursor, mtgArray=powerArray)
elif each == 'Unlimited':
print(f"Pulling {POWER_CONFIG[each]} from {each} stats")
powerArray = generate_index_history(setCheck=each, setId=3, boolCheck=POWER_CONFIG[each])
if len(powerArray) > 0:
print(f"Piping nested arrays")
insert_index(cursor=cursor, mtgArray=powerArray)
def prune_db(cursor):
"""(cursor) -> ()
Prunes active_products before making any further calculations (averages, etc.)"""
words = get_search_words()
# words = ['Revised Tundra MTG']
for value in words:
print(f'Pruning {value}....')
prune_active(value, cursor)
print('-------------------------------------')
print('Succesfully pruned active_products')
print('-------------------------------------')
if __name__ == '__main__':
inputCheck = input('Beginning once-a-day batch calc script -- are you sure you want to proceed?: ')
if inputCheck in ('Y', 'y'):
print('I understand. Beggining once-a-day batch script.')
prune_db(cursor=database_connection())
print()
# begin piping stats
pipe_power_stats()
print()
pipe_duals_stats()
print()
# begin piping index
pipe_power_index()
print()
pipe_duals_index()
print()
print('Batch process active. Data has been successfully inserted.')
elif inputCheck in ('N', 'n'):
print('Exiting batch process.')
| 59.76699
| 361
| 0.637102
| 1,443
| 12,312
| 5.229383
| 0.125433
| 0.129208
| 0.075537
| 0.041744
| 0.82229
| 0.813411
| 0.801484
| 0.79128
| 0.774848
| 0.77233
| 0
| 0.003342
| 0.246589
| 12,312
| 205
| 362
| 60.058537
| 0.810155
| 0.021686
| 0
| 0.716578
| 1
| 0.090909
| 0.547692
| 0.151361
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048128
| false
| 0
| 0.016043
| 0
| 0.096257
| 0.160428
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1103fd8a91d4ec4dd6a0a1fd098e01a19f737c78
| 159
|
py
|
Python
|
HiveMind_presence/__init__.py
|
JarbasHiveMind/HiveMind-presence
|
ca0a8f4ec179251b7b62e881bad93f3ec7497589
|
[
"Apache-2.0"
] | null | null | null |
HiveMind_presence/__init__.py
|
JarbasHiveMind/HiveMind-presence
|
ca0a8f4ec179251b7b62e881bad93f3ec7497589
|
[
"Apache-2.0"
] | null | null | null |
HiveMind_presence/__init__.py
|
JarbasHiveMind/HiveMind-presence
|
ca0a8f4ec179251b7b62e881bad93f3ec7497589
|
[
"Apache-2.0"
] | null | null | null |
from HiveMind_presence.devices import HiveMindNode
from HiveMind_presence.discovery import LocalDiscovery
from HiveMind_presence.presence import LocalPresence
| 39.75
| 54
| 0.90566
| 18
| 159
| 7.833333
| 0.5
| 0.255319
| 0.425532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075472
| 159
| 3
| 55
| 53
| 0.959184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3a219a3180a2dbffa003705847b56c8f6c429227
| 25,683
|
py
|
Python
|
hanibal/fiscaloriginal/tax.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | null | null | null |
hanibal/fiscaloriginal/tax.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | null | null | null |
hanibal/fiscaloriginal/tax.py
|
Christian-Castro/castro_odoo8
|
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import api
class account_tax(osv.osv):
_name = 'account.tax'
_inherit = 'account.tax'
def _unit_compute_ret(self, cr, uid, taxes, price_unit, product=None, partner=None, quantity=0):
taxes = self._applicable(cr, uid, taxes, price_unit, product, partner)
res = []
cur_price_unit=price_unit
for tax in taxes:
# we compute the amount for the current tax object and append it to the result
data = {'id':tax.id,
'name':tax.description and tax.description + " - " + tax.name or tax.name,
'account_collected_id':tax.account_collected_id.id,
'account_paid_id':tax.account_paid_id.id,
'base_code_id': tax.base_code_id.id,
'ref_base_code_id': tax.ref_base_code_id.id,
'sequence': tax.sequence,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'price_unit': cur_price_unit,
'tax_code_id': tax.tax_code_id.id,
'ref_tax_code_id': tax.ref_tax_code_id.id,
'codigo': tax.codigofiscal,
'porcentaje': tax.amount,
'tipo': tax.tipo,
}
res.append(data)
if tax.type=='percent':
amount = cur_price_unit * tax.amount
data['amount'] = amount
elif tax.type=='fixed':
data['amount'] = tax.amount
data['tax_amount']=quantity
# data['amount'] = quantity
elif tax.type=='code':
localdict = {'price_unit':cur_price_unit, 'product':product, 'partner':partner}
exec tax.python_compute in localdict
amount = localdict['result']
data['amount'] = amount
elif tax.type=='balance':
data['amount'] = cur_price_unit - reduce(lambda x,y: y.get('amount',0.0)+x, res, 0.0)
data['balance'] = cur_price_unit
amount2 = data.get('amount', 0.0)
if tax.child_ids:
if tax.child_depend:
latest = res.pop()
amount = amount2
child_tax = self._unit_compute(cr, uid, tax.child_ids, amount, product, partner, quantity)
res.extend(child_tax)
if tax.child_depend:
for r in res:
for name in ('base','ref_base'):
if latest[name+'_code_id'] and latest[name+'_sign'] and not r[name+'_code_id']:
r[name+'_code_id'] = latest[name+'_code_id']
r[name+'_sign'] = latest[name+'_sign']
r['price_unit'] = latest['price_unit']
latest[name+'_code_id'] = False
for name in ('tax','ref_tax'):
if latest[name+'_code_id'] and latest[name+'_sign'] and not r[name+'_code_id']:
r[name+'_code_id'] = latest[name+'_code_id']
r[name+'_sign'] = latest[name+'_sign']
r['amount'] = data['amount']
latest[name+'_code_id'] = False
if tax.include_base_amount:
cur_price_unit+=amount2
return res
def _unit_compute_inv_ret(self, cr, uid, taxes, price_unit, product=None, partner=None):
taxes = self._applicable(cr, uid, taxes, price_unit, product, partner)
res = []
taxes.reverse()
cur_price_unit = price_unit
tax_parent_tot = 0.0
for tax in taxes:
if (tax.type=='percent') and not tax.include_base_amount:
tax_parent_tot += tax.amount
for tax in taxes:
if (tax.type=='fixed') and not tax.include_base_amount:
cur_price_unit -= tax.amount
for tax in taxes:
if tax.type=='percent':
if tax.include_base_amount:
amount = cur_price_unit - (cur_price_unit / (1 + tax.amount))
else:
amount = (cur_price_unit / (1 + tax_parent_tot)) * tax.amount
elif tax.type=='fixed':
amount = tax.amount
elif tax.type=='code':
localdict = {'price_unit':cur_price_unit, 'product':product, 'partner':partner}
exec tax.python_compute_inv in localdict
amount = localdict['result']
elif tax.type=='balance':
amount = cur_price_unit - reduce(lambda x,y: y.get('amount',0.0)+x, res, 0.0)
if tax.include_base_amount:
cur_price_unit -= amount
todo = 0
else:
todo = 1
res.append({
'id': tax.id,
'todo': todo,
'name': tax.name,
'amount': amount,
'account_collected_id': tax.account_collected_id.id,
'account_paid_id': tax.account_paid_id.id,
'base_code_id': tax.base_code_id.id,
'ref_base_code_id': tax.ref_base_code_id.id,
'sequence': tax.sequence,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'price_unit': cur_price_unit,
'tax_code_id': tax.tax_code_id.id,
'ref_tax_code_id': tax.ref_tax_code_id.id,
'porcentaje': tax.amount,
'codigo': tax.description,
'tipo':tax.tipo,
})
if tax.child_ids:
if tax.child_depend:
del res[-1]
amount = price_unit
parent_tax = self._unit_compute_inv(cr, uid, tax.child_ids, amount, product, partner)
res.extend(parent_tax)
total = 0.0
for r in res:
if r['todo']:
total += r['amount']
for r in res:
r['price_unit'] -= total
r['todo'] = 0
return res
def compute_inv_ret(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None):
"""
Compute tax values for given PRICE_UNIT, QUANTITY and a buyer/seller ADDRESS_ID.
Price Unit is a VAT included price
RETURN:
[ tax ]
tax = {'name':'', 'amount':0.0, 'account_collected_id':1, 'account_paid_id':2}
one tax for each tax id in IDS and their children
"""
res = self._unit_compute_inv_ret(cr, uid, taxes, price_unit, product, partner=None)
total = 0.0
obj_precision = self.pool.get('decimal.precision')
for r in res:
prec = obj_precision.precision_get(cr, uid, 'Account')
if r.get('balance',False):
r['amount'] = round(r['balance'] * quantity, prec) - total
else:
r['amount'] = round(r['amount'] * quantity, prec)
total += r['amount']
return res
def _compute_ret(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None):
"""
Compute tax values for given PRICE_UNIT, QUANTITY and a buyer/seller ADDRESS_ID.
RETURN:
[ tax ]
tax = {'name':'', 'amount':0.0, 'account_collected_id':1, 'account_paid_id':2}
one tax for each tax id in IDS and their children
"""
res = self._unit_compute_ret(cr, uid, taxes, price_unit, product, partner, quantity)
total = 0.0
precision_pool = self.pool.get('decimal.precision')
for r in res:
if r.get('balance',False):
r['amount'] = round(r.get('balance', 0.0) * quantity, precision_pool.precision_get(cr, uid, 'Account')) - total
else:
r['amount'] = round(r.get('amount', 0.0) * quantity, precision_pool.precision_get(cr, uid, 'Account'))
total += r['amount']
return res
#----------------------------------------------------------------------------------
@api.v7
def compute_all(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None, force_excluded=False):
"""
:param force_excluded: boolean used to say that we don't want to consider the value of field price_include of
tax. It's used in encoding by line where you don't matter if you encoded a tax with that boolean to True or
False
RETURN: {
'total': 0.0, # Total without taxes
'total_included: 0.0, # Total with taxes
'taxes': [] # List of taxes, see compute for the format
}
"""
# By default, for each tax, tax amount will first be computed
# and rounded at the 'Account' decimal precision for each
# PO/SO/invoice line and then these rounded amounts will be
# summed, leading to the total amount for that tax. But, if the
# company has tax_calculation_rounding_method = round_globally,
# we still follow the same method, but we use a much larger
# precision when we round the tax amount for each line (we use
# the 'Account' decimal precision + 5), and that way it's like
# rounding after the sum of the tax amounts of each line
"""precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
totalin = totalex = round(price_unit * quantity, precision)"""
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
tax_compute_precision = precision
if taxes and taxes[0].company_id.tax_calculation_rounding_method == 'round_globally':
tax_compute_precision += 5
totalin = totalex = round(price_unit * quantity, precision)
tin = []
tex = []
for tax in taxes:
if not tax.price_include or force_excluded:
tex.append(tax)
else:
tin.append(tax)
tin = self.compute_inv_ret(cr, uid, tin, price_unit, quantity, product=product, partner=partner)
for r in tin:
totalex -= r.get('amount', 0.0)
totlex_qty = 0.0
try:
totlex_qty = totalex/quantity
except:
pass
tex = self._compute_ret(cr, uid, tex, totlex_qty, quantity, product=product, partner=partner)
for r in tex:
totalin += r.get('amount', 0.0)
return {
'total': totalex,
'total_included': totalin,
'taxes': tin + tex
}
@api.v8
def compute_all(self, price_unit, quantity, product=None, partner=None, force_excluded=False):
return self._model.compute_all(
self._cr, self._uid, self, price_unit, quantity,
product=product, partner=partner, force_excluded=force_excluded)
#----------------------------------------------------------------------------------
_columns = {
'codigofiscal': fields.char('Concepto de Retencion',10),
'descripcion': fields.char('Descripcion',100),
'tipo': fields.selection([('iva','IVA'),('fte','FUENTE')],'Tipo'),
'base': fields.selection([('iva','IVA'),('subtotal','Subtotal'),('basecero','Base 0'),('baseiva','Base 12'),('basenograva','Base no grava IVA')],'Base imponible'),
'reglaretencion_id':fields.one2many('fiscal.reglaretencion', 'tax_id', 'Reglas para impuestos'),
'esretencion' : fields.boolean('Es Retencion', requiered=True),
}
_defaults = {
'esretencion' : False,
}
def check_reglasretencion(self, cr, uid, ids, context=None):
for tax in self.browse(cr, uid, ids, context=context):
if not tax.esretencion:
continue
if not tax.reglaretencion_id:
return False
return True
_constraints = [
(check_reglasretencion,'Ingrese por lo menos una regla para retencion.', ['reglaretencion_id']),
]
account_tax()
# class account_tax(osv.osv):
# _name = 'account.tax'
# _inherit = 'account.tax'
#
# def _unit_compute_ret(self, cr, uid, taxes, price_unit, address_id=None, product=None, partner=None, quantity=0):
# taxes = self._applicable(cr, uid, taxes, price_unit, address_id, product, partner)
# res = []
# cur_price_unit=price_unit
# obj_partener_address = self.pool.get('res.partner.address')
# for tax in taxes:
# # we compute the amount for the current tax object and append it to the result
# data = {'id':tax.id,
# 'name':tax.description and tax.description + " - " + tax.name or tax.name,
# 'account_collected_id':tax.account_collected_id.id,
# 'account_paid_id':tax.account_paid_id.id,
# 'base_code_id': tax.base_code_id.id,
# 'ref_base_code_id': tax.ref_base_code_id.id,
# 'sequence': tax.sequence,
# 'base_sign': tax.base_sign,
# 'tax_sign': tax.tax_sign,
# 'ref_base_sign': tax.ref_base_sign,
# 'ref_tax_sign': tax.ref_tax_sign,
# 'price_unit': cur_price_unit,
# 'tax_code_id': tax.tax_code_id.id,
# 'ref_tax_code_id': tax.ref_tax_code_id.id,
#
# 'codigo': tax.codigofiscal,
# 'porcentaje': tax.amount,
# 'tipo': tax.tipo,
# }
#
# res.append(data)
# if tax.type=='percent':
# amount = cur_price_unit * tax.amount
# data['amount'] = amount
#
# elif tax.type=='fixed':
# data['amount'] = tax.amount
# data['tax_amount']=quantity
# # data['amount'] = quantity
# elif tax.type=='code':
# address = address_id and obj_partener_address.browse(cr, uid, address_id) or None
# localdict = {'price_unit':cur_price_unit, 'address':address, 'product':product, 'partner':partner}
# exec tax.python_compute in localdict
# amount = localdict['result']
# data['amount'] = amount
# elif tax.type=='balance':
# data['amount'] = cur_price_unit - reduce(lambda x,y: y.get('amount',0.0)+x, res, 0.0)
# data['balance'] = cur_price_unit
#
# amount2 = data.get('amount', 0.0)
# if tax.child_ids:
# if tax.child_depend:
# latest = res.pop()
# amount = amount2
# child_tax = self._unit_compute(cr, uid, tax.child_ids, amount, address_id, product, partner, quantity)
# res.extend(child_tax)
# if tax.child_depend:
# for r in res:
# for name in ('base','ref_base'):
# if latest[name+'_code_id'] and latest[name+'_sign'] and not r[name+'_code_id']:
# r[name+'_code_id'] = latest[name+'_code_id']
# r[name+'_sign'] = latest[name+'_sign']
# r['price_unit'] = latest['price_unit']
# latest[name+'_code_id'] = False
# for name in ('tax','ref_tax'):
# if latest[name+'_code_id'] and latest[name+'_sign'] and not r[name+'_code_id']:
# r[name+'_code_id'] = latest[name+'_code_id']
# r[name+'_sign'] = latest[name+'_sign']
# r['amount'] = data['amount']
# latest[name+'_code_id'] = False
# if tax.include_base_amount:
# cur_price_unit+=amount2
# return res
#
# def _unit_compute_inv_ret(self, cr, uid, taxes, price_unit, address_id=None, product=None, partner=None):
# taxes = self._applicable(cr, uid, taxes, price_unit, address_id, product, partner)
# obj_partener_address = self.pool.get('res.partner.address')
# res = []
# taxes.reverse()
# cur_price_unit = price_unit
#
# tax_parent_tot = 0.0
# for tax in taxes:
# if (tax.type=='percent') and not tax.include_base_amount:
# tax_parent_tot += tax.amount
#
# for tax in taxes:
# if (tax.type=='fixed') and not tax.include_base_amount:
# cur_price_unit -= tax.amount
#
# for tax in taxes:
# if tax.type=='percent':
# if tax.include_base_amount:
# amount = cur_price_unit - (cur_price_unit / (1 + tax.amount))
# else:
# amount = (cur_price_unit / (1 + tax_parent_tot)) * tax.amount
#
# elif tax.type=='fixed':
# amount = tax.amount
#
# elif tax.type=='code':
# address = address_id and obj_partener_address.browse(cr, uid, address_id) or None
# localdict = {'price_unit':cur_price_unit, 'address':address, 'product':product, 'partner':partner}
# exec tax.python_compute_inv in localdict
# amount = localdict['result']
# elif tax.type=='balance':
# amount = cur_price_unit - reduce(lambda x,y: y.get('amount',0.0)+x, res, 0.0)
#
# if tax.include_base_amount:
# cur_price_unit -= amount
# todo = 0
# else:
# todo = 1
# res.append({
# 'id': tax.id,
# 'todo': todo,
# 'name': tax.name,
# 'amount': amount,
# 'account_collected_id': tax.account_collected_id.id,
# 'account_paid_id': tax.account_paid_id.id,
# 'base_code_id': tax.base_code_id.id,
# 'ref_base_code_id': tax.ref_base_code_id.id,
# 'sequence': tax.sequence,
# 'base_sign': tax.base_sign,
# 'tax_sign': tax.tax_sign,
# 'ref_base_sign': tax.ref_base_sign,
# 'ref_tax_sign': tax.ref_tax_sign,
# 'price_unit': cur_price_unit,
# 'tax_code_id': tax.tax_code_id.id,
# 'ref_tax_code_id': tax.ref_tax_code_id.id,
#
# 'porcentaje': tax.amount,
# 'codigo': tax.description,
# 'tipo':tax.tipo,
# })
#
# if tax.child_ids:
# if tax.child_depend:
# del res[-1]
# amount = price_unit
#
# parent_tax = self._unit_compute_inv(cr, uid, tax.child_ids, amount, address_id, product, partner)
# res.extend(parent_tax)
#
# total = 0.0
# for r in res:
# if r['todo']:
# total += r['amount']
# for r in res:
# r['price_unit'] -= total
# r['todo'] = 0
#
# return res
#
# def compute_inv_ret(self, cr, uid, taxes, price_unit, quantity, address_id=None, product=None, partner=None):
# """
# Compute tax values for given PRICE_UNIT, QUANTITY and a buyer/seller ADDRESS_ID.
# Price Unit is a VAT included price
#
# RETURN:
# [ tax ]
# tax = {'name':'', 'amount':0.0, 'account_collected_id':1, 'account_paid_id':2}
# one tax for each tax id in IDS and their children
# """
# res = self._unit_compute_inv_ret(cr, uid, taxes, price_unit, address_id, product, partner=None)
# total = 0.0
# obj_precision = self.pool.get('decimal.precision')
# for r in res:
# prec = obj_precision.precision_get(cr, uid, 'Account')
# if r.get('balance',False):
# r['amount'] = round(r['balance'] * quantity, prec) - total
# else:
# r['amount'] = round(r['amount'] * quantity, prec)
# total += r['amount']
# return res
#
# def _compute_ret(self, cr, uid, taxes, price_unit, quantity, address_id=None, product=None, partner=None):
# """
# Compute tax values for given PRICE_UNIT, QUANTITY and a buyer/seller ADDRESS_ID.
#
# RETURN:
# [ tax ]
# tax = {'name':'', 'amount':0.0, 'account_collected_id':1, 'account_paid_id':2}
# one tax for each tax id in IDS and their children
# """
# res = self._unit_compute_ret(cr, uid, taxes, price_unit, address_id, product, partner, quantity)
# total = 0.0
# precision_pool = self.pool.get('decimal.precision')
# for r in res:
# if r.get('balance',False):
# r['amount'] = round(r.get('balance', 0.0) * quantity, precision_pool.precision_get(cr, uid, 'Account')) - total
# else:
# r['amount'] = round(r.get('amount', 0.0) * quantity, precision_pool.precision_get(cr, uid, 'Account'))
# total += r['amount']
#
# return res
#
# def compute_all(self, cr, uid, taxes, price_unit, quantity, address_id=None, product=None, partner=None, force_excluded=False):
#
# """
# :param force_excluded: boolean used to say that we don't want to consider the value of field price_include of
# tax. It's used in encoding by line where you don't matter if you encoded a tax with that boolean to True or
# False
# RETURN: {
# 'total': 0.0, # Total without taxes
# 'total_included: 0.0, # Total with taxes
# 'taxes': [] # List of taxes, see compute for the format
# }
# """
# precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
# totalin = totalex = round(price_unit * quantity, precision)
# tin = []
# tex = []
# for tax in taxes:
# if not tax.price_include or force_excluded:
# tex.append(tax)
# else:
# tin.append(tax)
# tin = self.compute_inv_ret(cr, uid, tin, price_unit, quantity, address_id=address_id, product=product, partner=partner)
# for r in tin:
# totalex -= r.get('amount', 0.0)
# totlex_qty = 0.0
# try:
# totlex_qty = totalex/quantity
# except:
# pass
# tex = self._compute_ret(cr, uid, tex, totlex_qty, quantity, address_id=address_id, product=product, partner=partner)
# for r in tex:
# totalin += r.get('amount', 0.0)
#
# return {
# 'total': totalex,
# 'total_included': totalin,
# 'taxes': tin + tex
# }
#
# _columns = {
# 'codigofiscal': fields.char('Concepto de Retencion',10),
# 'descripcion': fields.char('Descripcion',100),
# 'tipo': fields.selection([('iva','IVA'),('fte','FUENTE')],'Tipo'),
# 'base': fields.selection([('iva','IVA'),('subtotal','Subtotal'),('basecero','Base 0'),('baseiva','Base 12'),('basenograva','Base no grava IVA')],'Base imponible'),
# 'reglaretencion_id':fields.one2many('fiscal.reglaretencion', 'tax_id', 'Reglas para impuestos'),
# 'esretencion' : fields.boolean('Es Retencion', requiered=True),
# }
#
# _defaults = {
# 'esretencion' : False,
# }
#
# def check_reglasretencion(self, cr, uid, ids, context=None):
# for tax in self.browse(cr, uid, ids, context=context):
# if not tax.esretencion:
# continue
# if not tax.reglaretencion_id:
# return False
#
# return True
#
# _constraints = [
# (check_reglasretencion,'Ingrese por lo menos una regla para retencion.', ['reglaretencion_id']),
# ]
#
# account_tax()
| 44.434256
| 173
| 0.523576
| 2,978
| 25,683
| 4.319342
| 0.096038
| 0.058074
| 0.029853
| 0.02099
| 0.916738
| 0.916116
| 0.907253
| 0.898468
| 0.89497
| 0.887818
| 0
| 0.007927
| 0.346688
| 25,683
| 578
| 174
| 44.434256
| 0.758686
| 0.512752
| 0
| 0.42723
| 0
| 0
| 0.120093
| 0.001953
| 0
| 0
| 0
| 0.00173
| 0
| 0
| null | null | 0.004695
| 0.00939
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3a2de209013fe86b0edd19889d222f15597100c6
| 116,403
|
py
|
Python
|
keysightSD1/keysightSD1.py
|
opietx/core_tools
|
d5bd2d4beed74791b80ff5bdabd67774403763ef
|
[
"BSD-2-Clause"
] | null | null | null |
keysightSD1/keysightSD1.py
|
opietx/core_tools
|
d5bd2d4beed74791b80ff5bdabd67774403763ef
|
[
"BSD-2-Clause"
] | null | null | null |
keysightSD1/keysightSD1.py
|
opietx/core_tools
|
d5bd2d4beed74791b80ff5bdabd67774403763ef
|
[
"BSD-2-Clause"
] | null | null | null |
import os;
import sys
from ctypes import *
from math import pow, log, ceil
from abc import ABCMeta, abstractmethod
import keysightSD1.SD1constants as constants
import numpy as np
if sys.version_info[1] > 7 and sys.platform not in ['linux', 'darwin']:
os.add_dll_directory('C:\\Program Files\\Keysight\\SD1\\shared')
os.add_dll_directory('C:\\Program Files\\Common Files\\Keysight\\PathWave Test Sync Executive\\Core\\bin')
def to_numpy_float(data):
if type(data) != np.ndarray:
return np.array(data, np.float)
if data.dtype != np.float:
return data.astype(np.float)
return data
def to_numpy_int16(data):
if type(data) != np.ndarray:
return np.array(data, np.int16)
if data.dtype != np.int16:
return data.astype(np.int16)
return data
def to_numpy_int32(data):
if type(data) != np.ndarray:
return np.array(data, np.int32)
if data.dtype != np.int32:
return data.astype(np.int32)
return data
def to_numpy_uint32(data):
if type(data) != np.ndarray:
return np.array(data, np.uint32)
if data.dtype != np.uint32:
return data.astype(np.uint32)
return data
class SD_Object :
if sys.platform not in ['linux', 'darwin']:
__core_dll = cdll.LoadLibrary("SD1core" if os.name == 'nt' else "libSD1core.so")
def __init__(self) :
self.__handle = 0;
@classmethod
def __formatString(cls, string) :
tmp = string.decode();
return tmp[0:tmp.find('\0')];
class SD_Error(SD_Object) :
STATUS_OK = 0;
NONE = 0;
STATUS_DEMO = 1;
OPENING_MODULE = -8000;
CLOSING_MODULE = -8001;
OPENING_HVI = -8002;
CLOSING_HVI = -8003;
MODULE_NOT_OPENED = -8004;
MODULE_NOT_OPENED_BY_USER = -8005;
MODULE_ALREADY_OPENED = -8006;
HVI_NOT_OPENED = -8007;
INVALID_OBJECTID = -8008;
INVALID_MODULEID = -8009;
INVALID_MODULEUSERNAME = -8010;
INVALID_HVIID = -8011;
INVALID_OBJECT = -8012;
INVALID_NCHANNEL = -8013;
BUS_DOES_NOT_EXIST = -8014;
BITMAP_ASSIGNED_DOES_NOT_EXIST = -8015;
BUS_INVALID_SIZE = -8016;
BUS_INVALID_DATA = -8017;
INVALID_VALUE = -8018;
CREATING_WAVE = -8019;
NOT_VALID_PARAMETERS = -8020;
AWG_FAILED = -8021;
DAQ_INVALID_FUNCTIONALITY = -8022;
DAQ_POOL_ALREADY_RUNNING = -8023;
UNKNOWN = -8024;
INVALID_PARAMETERS = -8025;
MODULE_NOT_FOUND = -8026;
DRIVER_RESOURCE_BUSY = -8027;
DRIVER_RESOURCE_NOT_READY = -8028;
DRIVER_ALLOCATE_BUFFER = -8029;
ALLOCATE_BUFFER = -8030;
RESOURCE_NOT_READY = -8031;
HARDWARE = -8032;
INVALID_OPERATION = -8033;
NO_COMPILED_CODE = -8034;
FW_VERIFICATION = -8035;
COMPATIBILITY = -8036;
INVALID_TYPE = -8037;
DEMO_MODULE = -8038;
INVALID_BUFFER = -8039;
INVALID_INDEX = -8040;
INVALID_NHISTOGRAM = -8041;
INVALID_NBINS = -8042;
INVALID_MASK = -8043;
INVALID_WAVEFORM = -8044;
INVALID_STROBE = -8045;
INVALID_STROBE_VALUE = -8046;
INVALID_DEBOUNCING = -8047;
INVALID_PRESCALER = -8048;
INVALID_PORT = -8049;
INVALID_DIRECTION = -8050;
INVALID_MODE = -8051;
INVALID_FREQUENCY = -8052;
INVALID_IMPEDANCE = -8053;
INVALID_GAIN = -8054;
INVALID_FULLSCALE = -8055;
INVALID_FILE = -8056;
INVALID_SLOT = -8057;
INVALID_NAME = -8058;
INVALID_SERIAL = -8059;
INVALID_START = -8060;
INVALID_END = -8061;
INVALID_CYCLES = -8062;
HVI_INVALID_NUMBER_MODULES = -8063;
DAQ_P2P_ALREADY_RUNNING = -8064;
OPEN_DRAIN_NOT_SUPPORTED = -8065;
CHASSIS_PORTS_NOT_SUPPORTED = -8066;
CHASSIS_SETUP_NOT_SUPPORTED = -8067;
OPEN_DRAIN_FAILED = -8068;
CHASSIS_SETUP_FAILED = -8069;
INVALID_PART = -8070;
INVALID_SIZE = -8071;
INVALID_HANDLE = -8072;
NO_WAVEFORMS_IN_LIST = -8073
PATHWAVE_REGISTER_NOT_FOUND = -8074
SD_ERROR_HVI_DRIVER_ERROR = -8075
BAD_MODULE_OPEN_OPTION = -8076
FW_UPGRADE_REQUIRED = -8077
NO_FP_OPTION = -8078
FILE_DOES_NOT_EXIST = -8079
SW_UPGRADE_REQUIRED = -8080
INVALID_SANDBOX_INTERFACE = - 8081
MODULE_NOT_SUPPORTED = - 8082
SD_WARNING_DAQ_POINTS_ODD_NUM = -9000
@classmethod
def getErrorMessage(cls, errorNumber) :
cls._SD_Object__core_dll.SD_GetErrorMessage.restype = c_char_p;
return cls._SD_Object__core_dll.SD_GetErrorMessage(errorNumber).decode();
class SD_Object_Type :
HVI = 1;
AOU = 2;
TDC = 3;
DIO = 4;
WAVE = 5;
AIN = 6;
AIO = 7;
class SD_Waveshapes :
AOU_HIZ = -1;
AOU_OFF = 0;
AOU_SINUSOIDAL = 1;
AOU_TRIANGULAR = 2;
AOU_SQUARE = 4;
AOU_DC = 5;
AOU_AWG = 6;
AOU_PARTNER = 8;
class SD_DigitalFilterModes :
AOU_FILTER_OFF = 0;
AOU_FILTER_FLATNESS = 1;
AOU_FILTER_FIFTEEN_TAP = 3;
class SD_WaveformTypes :
WAVE_ANALOG = 0;
WAVE_IQ = 2;
WAVE_IQPOLAR = 3;
WAVE_DIGITAL = 5;
WAVE_ANALOG_DUAL = 7;
class SD_ModulationTypes :
AOU_MOD_OFF = 0;
AOU_MOD_FM = 1;
AOU_MOD_PHASE = 2;
AOU_MOD_AM = 1;
AOU_MOD_OFFSET = 2;
class SD_TriggerDirections :
AOU_TRG_OUT = 0;
AOU_TRG_IN = 1;
class SD_TriggerBehaviors :
TRIGGER_NONE = 0;
TRIGGER_HIGH = 1;
TRIGGER_LOW = 2;
TRIGGER_RISE = 3;
TRIGGER_FALL = 4;
class SD_MarkerModes :
DISABLED = 0;
START = 1;
START_AFTER_DELAY = 2;
EVERY_CYCLE = 3;
class SD_TriggerValue :
LOW = 0;
HIGH = 1;
class SD_TriggerPolarity :
ACTIVE_LOW = 0;
ACTIVE_HIGH = 1;
class SD_SyncModes :
SYNC_NONE = 0;
SYNC_CLK10 = 1;
class SD_QueueMode :
ONE_SHOT = 0;
CYCLIC = 1;
class SD_ResetMode :
LOW = 0;
HIGH = 1;
PULSE = 2;
class SD_AddressingMode :
AUTOINCREMENT = 0;
FIXED = 1;
class SD_AccessMode :
NONDMA = 0;
DMA = 1;
class SD_FpgaTriggerDirection :
IN = 0;
INOUT = 1;
class SD_TriggerModes :
AUTOTRIG = 0;
VIHVITRIG = 1;
SWHVITRIG = 1;
EXTTRIG = 2;
HWDIGTRIG = 2;
HWANATRIG = 3;
SWHVITRIG_CYCLE = 5;
EXTTRIG_CYCLE = 6;
ANALOGAUTOTRIG = 11;
class SD_TriggerExternalSources :
TRIGGER_EXTERN = 0;
TRIGGER_PXI = 4000;
TRIGGER_PXI0 = 4000;
TRIGGER_PXI1 = 4001;
TRIGGER_PXI2 = 4002;
TRIGGER_PXI3 = 4003;
TRIGGER_PXI4 = 4004;
TRIGGER_PXI5 = 4005;
TRIGGER_PXI6 = 4006;
TRIGGER_PXI7 = 4007;
class SD_IOdirections :
DIR_IN = 0;
DIR_OUT = 1;
class SD_PinDirections :
DIR_IN = 0;
DIR_OUT = 1;
class SD_Strobe :
STROBE_OFF = 0;
STROBE_ON = 1;
STROBE_LEVEL = 2;##0b10;
STROBE_EDGERISE = 1;
STROBE_EDGEFALL = 0;
class SD_DebouncingTypes :
DEBOUNCING_NONE = 0;
DEBOUNCING_LOW = 2;##0b10;
DEBOUNCING_HIGH = 3;##0b11;
class SD_WindowTypes :
RECTANGULAR = 0;
BARTLETT = 1;
HANNING = 2;
HAMMING = 3;
BLACKMAN = 4;
class SD_Compatibility :
LEGACY = 0;
KEYSIGHT = 1;
class SD_Wave(SD_Object) :
PADDING_ZERO = 0;
PADDING_REPEAT = 1;
def newFromFile(self, waveformFile) :
self._SD_Object__handle = self._SD_Object__core_dll.SD_Wave_newFromFile(waveformFile.encode());
return self._SD_Object__handle;
def __del__(self):
self._SD_Object__core_dll.SD_Wave_delete(self._SD_Object__handle)
def newFromArrayDouble(self, waveformType, waveformDataA, waveformDataB = None):
if len(waveformDataA) > 0 and (waveformDataB is None or len(waveformDataA) == len(waveformDataB)):
dataA_np = to_numpy_float(waveformDataA)
waveform_dataA_C = dataA_np.ctypes.data_as(POINTER(c_double*len(dataA_np))).contents
if waveformDataB is None:
waveform_dataB_C = c_void_p(0)
else:
dataB_np = to_numpy_float(waveformDataB)
waveform_dataB_C = dataB_np.ctypes.data_as(POINTER(c_double*len(dataB_np))).contents
self._SD_Object__handle = self._SD_Object__core_dll.SD_Wave_newFromArrayDouble(waveformType, waveform_dataA_C._length_, waveform_dataA_C, waveform_dataB_C)
return self._SD_Object__handle
else :
self._SD_Object__handle = 0
return SD_Error.INVALID_VALUE
def newFromArrayDoubleNP(self, waveformType, waveformDataA, waveformDataB = None):
return self.newFromArrayDouble(waveformType, waveformDataA, waveformDataB)
def newFromArrayInteger(self, waveformType, waveformDataA, waveformDataB = None):
if len(waveformDataA) > 0 and (waveformDataB is None or len(waveformDataA) == len(waveformDataB)) :
dataA_np = to_numpy_int32(waveformDataA)
waveform_dataA_C = dataA_np.ctypes.data_as(POINTER(c_int32 * len(dataA_np))).contents
if waveformDataB is None:
waveform_dataB_C = c_void_p(0);
else :
dataB_np = to_numpy_int32(waveformDataB)
waveform_dataB_C = dataB_np.ctypes.data_as(POINTER(c_int32 * len(dataB_np))).contents
self._SD_Object__handle = self._SD_Object__core_dll.SD_Wave_newFromArrayInteger(waveformType, waveform_dataA_C._length_, waveform_dataA_C, waveform_dataB_C);
return self._SD_Object__handle;
else :
self._SD_Object__handle = 0;
return SD_Error.INVALID_VALUE
def getStatus(self) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_Wave_getStatus(self._SD_Object__handle);
else :
return SD_Error.CREATING_WAVE;
def getType(self) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_Wave_getType(self._SD_Object__handle);
else :
return SD_Error.CREATING_WAVE;
class SD_SandBoxRegister(SD_Object):
def __init__(self, moduleId, registerId):
self._SD_Object__handle = moduleId
self._SD_Register_Id = registerId
[self.Address, self.Length, self.AccessType, self.Name] =self.__getRegisterInfo()
def __getRegisterInfo(self) :
if self._SD_Object__handle > 0 :
address = c_int(0)
length = c_int(0)
name = ''.rjust(100, '\0').encode();
accessType = ''.rjust(20, '\0').encode();
error = self._SD_Object__core_dll.SD_Module_FPGAgetRegisterInfo(self._SD_Object__handle, self._SD_Register_Id, byref(address),byref(length),accessType, name);
if error < 0 :
return error
else :
return [address.value,length.value, accessType.decode(), name.decode()]
else :
return SD_Error.MODULE_NOT_OPENED;
def readRegisterBuffer(self, indexOffset, bufferSize, addressMode, accessMode) :
if self._SD_Object__handle > 0 :
if bufferSize > 0 :
bufferSize = int(bufferSize)
data = (c_int * bufferSize)()
error = self._SD_Object__core_dll.SD_Module_FPGAreadRegisterBuffer(self._SD_Object__handle, self._SD_Register_Id,indexOffset, data, bufferSize, addressMode, accessMode)
if error < 0 :
return error
else :
return np.array(cast(data, POINTER(c_int*bufferSize)).contents)
else :
return SD_Error.INVALID_VALUE
else :
return SD_Error.MODULE_NOT_OPENED
def writeRegisterBuffer(self, indexOffset, buffer, addressMode, accessMode) :
if self._SD_Object__handle > 0 :
if len(buffer) > 100 :
# for long arrays conversion via numpy is much faster.
data_np = to_numpy_uint32(buffer)
data_C = data_np.ctypes.data_as(POINTER(c_int32 * len(data_np))).contents
return self._SD_Object__core_dll.SD_Module_FPGAwriteRegisterBuffer(
self._SD_Object__handle,
self._SD_Register_Id,
indexOffset,
data_C,
len(buffer),
addressMode,
accessMode);
elif len(buffer) > 0 :
data = (c_int * len(buffer))(*buffer);
return self._SD_Object__core_dll.SD_Module_FPGAwriteRegisterBuffer(self._SD_Object__handle, self._SD_Register_Id, indexOffset, data, data._length_, addressMode, accessMode);
else :
return SD_Error.INVALID_VALUE;
else :
return SD_Error.MODULE_NOT_OPENED;
def writeRegisterInt32(self, data) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_Module_FPGAwriteRegisterInt32(self._SD_Object__handle, self._SD_Register_Id, data);
else :
return SD_Error.MODULE_NOT_OPENED;
def readRegisterInt32(self) :
if self._SD_Object__handle > 0 :
data = c_int(0)
error = self._SD_Object__core_dll.SD_Module_FPGAreadRegisterInt32(self._SD_Object__handle, self._SD_Register_Id, byref(data));
if error < 0 :
return error
else :
return data.value
else :
return SD_Error.MODULE_NOT_OPENED;
class SD_Module(SD_Object) :
def FPGAGetSandBoxKernelUUID(self) :
version = ''.rjust(37, '\0').encode();
if self._SD_Object__handle > 0 :
retValue = self._SD_Object__core_dll.SD_Module_FPGAgetKernelUUID(self._SD_Object__handle, version);
if retValue >= 0 :
return version.decode();
else :
return retValue;
else :
return SD_Error.MODULE_NOT_OPENED;
def openWithSerialNumber(self, partNumber, serialNumber) :
if self._SD_Object__handle <= 0 :
self._SD_Object__handle = self._SD_Object__core_dll.SD_Module_openWithSerialNumber(partNumber.encode(), serialNumber.encode())
if self._SD_Object__handle >= 0 and self.isHvi2Module():
self.createHvi()
return self._SD_Object__handle
def openWithSerialNumberCompatibility(self, partNumber, serialNumber, compatibility) :
if self._SD_Object__handle <= 0 :
self._SD_Object__handle = self._SD_Object__core_dll.SD_Module_openWithSerialNumberCompatibility(partNumber.encode(), serialNumber.encode(), compatibility);
if self._SD_Object__handle >= 0 and self.isHvi2Module():
self.createHvi()
return self._SD_Object__handle
def openWithSlot(self, partNumber, nChassis, nSlot) :
if self._SD_Object__handle <= 0 :
self._SD_Object__handle = self._SD_Object__core_dll.SD_Module_openWithSlot(partNumber.encode(), nChassis, nSlot)
if self._SD_Object__handle >= 0 and self.isHvi2Module():
self.createHvi()
return self._SD_Object__handle
def openWithOptions(self, partNumber, nChassis, nSlot, options) :
if self._SD_Object__handle <= 0 :
self._SD_Object__handle = self._SD_Object__core_dll.SD_Module_openWithOptions(partNumber.encode(), nChassis, nSlot, options.encode())
if self._SD_Object__handle >= 0 and self.isHvi2Module():
self.createHvi()
return self._SD_Object__handle
def openWithSlotCompatibility(self, partNumber, nChassis, nSlot, compatibility):
if self._SD_Object__handle <= 0:
self._SD_Object__handle = self._SD_Object__core_dll.SD_Module_openWithSlotCompatibility(partNumber.encode(), nChassis, nSlot, compatibility);
if self._SD_Object__handle >= 0 and self.isHvi2Module():
self.createHvi()
return self._SD_Object__handle
def close(self) :
if self._SD_Object__handle > 0 :
self._SD_Object__handle = self._SD_Object__core_dll.SD_Module_close(self._SD_Object__handle)
return self._SD_Object__handle
def isOpen(self) :
return self._SD_Object__handle > 0
def getType(self) :
objectType = self._SD_Object__core_dll.SD_Module_getType(self._SD_Object__handle)
if objectType < SD_Object_Type.AOU or objectType > SD_Object_Type.AIO or objectType == SD_Object_Type.WAVE:
objectType = SD_Error.INVALID_MODULEID
return objectType
@classmethod
def moduleCount(cls):
return cls._SD_Object__core_dll.SD_Module_count();
@classmethod
def getProductNameBySlot(cls, chassis, slot) :
buffer = ''.rjust(50, '\0').encode();
error = cls._SD_Object__core_dll.SD_Module_getProductNameBySlot(chassis, slot, buffer);
if error < 0 :
return error;
else :
return cls._SD_Object__formatString(buffer);
@classmethod
def getProductNameByIndex(cls, index) :
buffer = ''.rjust(50, '\0').encode();
error = cls._SD_Object__core_dll.SD_Module_getProductNameByIndex(index, buffer);
if error < 0 :
return error;
else :
return cls._SD_Object__formatString(buffer);
@classmethod
def getSerialNumberBySlot(cls, chassis, slot) :
buffer = ''.rjust(50, '\0').encode();
error = cls._SD_Object__core_dll.SD_Module_getSerialNumberBySlot(chassis, slot, buffer);
if error < 0 :
return error;
else :
return cls._SD_Object__formatString(buffer);
@classmethod
def getSerialNumberByIndex(cls, index) :
buffer = ''.rjust(50, '\0').encode();
error = cls._SD_Object__core_dll.SD_Module_getSerialNumberByIndex(index, buffer);
if error < 0 :
return error;
else :
return cls._SD_Object__formatString(buffer);
@classmethod
def getTypeBySlot(cls, chassis, slot) :
return cls._SD_Object__core_dll.SD_Module_getTypeBySlot(chassis, slot);
@classmethod
def getTypeByIndex(cls, index) :
return cls._SD_Object__core_dll.SD_Module_getTypeByIndex(index);
@classmethod
def getChassisByIndex(cls, index) :
return cls._SD_Object__core_dll.SD_Module_getChassisByIndex(index);
@classmethod
def getSlotByIndex(cls, index) :
return cls._SD_Object__core_dll.SD_Module_getSlotByIndex(index);
def runSelfTest(self) :
result = 0;
if self._SD_Object__handle > 0 :
result = self._SD_Object__core_dll.SD_Module_runSelfTest(self._SD_Object__handle);
else :
result = SD_Error.MODULE_NOT_OPENED;
return result;
def getSerialNumber(self) :
serial = ''.rjust(50, '\0').encode();
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_Module_getSerialNumber.restype = c_char_p;
return self._SD_Object__core_dll.SD_Module_getSerialNumber(self._SD_Object__handle, serial).decode();
else :
return SD_Error.MODULE_NOT_OPENED;
def getProductName(self) :
product = ''.rjust(50, '\0').encode();
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_Module_getProductName.restype = c_char_p;
return self._SD_Object__core_dll.SD_Module_getProductName(self._SD_Object__handle, product).decode();
else :
return SD_Error.MODULE_NOT_OPENED;
def getFirmwareVersion(self) :
version = ''.rjust(9, '\0').encode();
if self._SD_Object__handle > 0 :
retValue = self._SD_Object__core_dll.SD_Module_getFirmwareVersion(self._SD_Object__handle, version);
if retValue >= 0 :
return version.decode();
else :
return retValue;
else :
return SD_Error.MODULE_NOT_OPENED;
def getHardwareVersion(self) :
version = ''.rjust(9, '\0').encode();
if self._SD_Object__handle > 0 :
retValue = self._SD_Object__core_dll.SD_Module_getHardwareVersion(self._SD_Object__handle, version);
if retValue >= 0 :
return version.decode();
else :
return retValue;
else :
return SD_Error.MODULE_NOT_OPENED;
def getChassis(self) :
result = 0;
if self._SD_Object__handle > 0 :
result = self._SD_Object__core_dll.SD_Module_getChassis(self._SD_Object__handle);
else :
result = SD_Error.MODULE_NOT_OPENED;
return result;
def getSlot(self) :
result = 0;
if self._SD_Object__handle > 0 :
result = self._SD_Object__core_dll.SD_Module_getSlot(self._SD_Object__handle);
else :
result = SD_Error.MODULE_NOT_OPENED;
return result;
def getTemperature(self) :
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_Module_getTemperature.restype = c_double;
result = self._SD_Object__core_dll.SD_Module_getTemperature(self._SD_Object__handle);
if result < 0 :
return int(result);
else :
return result;
else :
return SD_Error.MODULE_NOT_OPENED;
def getOptions(self, optionkey) :
varToFill = ''.rjust(200, '\0').encode();
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_Module_getOptions.restype = c_char_p;
result = self._SD_Object__core_dll.SD_Module_getOptions(self._SD_Object__handle,optionkey.encode(), varToFill, len(varToFill), self.getType()).decode();
return result;
else :
return SD_Error.MODULE_NOT_OPENED;
## PXItrigger
def PXItriggerWrite(self, trigger, value) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_Module_PXItriggerWrite(self._SD_Object__handle, trigger, value);
else :
return SD_Error.MODULE_NOT_OPENED;
def PXItriggerRead(self, trigger) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_Module_PXItriggerRead(self._SD_Object__handle, trigger);
else :
return SD_Error.MODULE_NOT_OPENED;
## External Trigger Lines
def translateTriggerPXItoExternalTriggerLine(self, trigger) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_Module_translateTriggerPXItoExternalTriggerLine(self._SD_Object__handle, trigger);
else :
return SD_Error.MODULE_NOT_OPENED;
def translateTriggerIOtoExternalTriggerLine(self, trigger) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_Module_translateTriggerIOtoExternalTriggerLine(self._SD_Object__handle, trigger);
else :
return SD_Error.MODULE_NOT_OPENED;
## FPGA
def FPGAload(self, fileName) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_Module_FPGAload(self._SD_Object__handle, fileName.encode());
else :
return SD_Error.MODULE_NOT_OPENED;
def FPGAconfigureFromK7z(self, fileName) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_Module_FPGAconfigureFromK7z(self._SD_Object__handle, fileName.encode());
else :
return SD_Error.MODULE_NOT_OPENED;
def FPGAreset(self, mode) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_Module_FPGAreset(self._SD_Object__handle, mode);
else :
return SD_Error.MODULE_NOT_OPENED;
def FPGAgetSandBoxRegister(self, registerName):
if self._SD_Object__handle > 0 :
id = self._SD_Object__core_dll.SD_Module_FPGAgetRegisterId(self._SD_Object__handle, registerName.encode());
if id < 0 :
return id
else :
return SD_SandBoxRegister(self._SD_Object__handle,id )
else :
return SD_Error.MODULE_NOT_OPENED;
def FPGAgetSandBoxRegisters(self, count):
if self._SD_Object__handle > 0 :
count = int(count)
data = (c_int * count)()
error = self._SD_Object__core_dll.SD_Module_FPGAgetRegisterIds(self._SD_Object__handle, data, count);
if error < 0 :
return error
else :
registers = []
for id in np.array(cast(data, POINTER(c_int*count)).contents):
id = int(id)
registers.append(SD_SandBoxRegister(self._SD_Object__handle,id ))
return registers
else :
return SD_Error.MODULE_NOT_OPENED;
def FPGATriggerConfig(self, externalSource, direction, polarity, syncMode, delay5Tclk) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_Module_FPGATriggerConfig(self._SD_Object__handle, externalSource, direction, polarity, syncMode, delay5Tclk);
else :
return SD_Error.MODULE_NOT_OPENED;
# HVI2
def isHvi2Module(self):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_Module_isHvi2Module(self._SD_Object__handle)
else :
return SD_Error.MODULE_NOT_OPENED
@abstractmethod
def createHvi(self):
pass
def getHviEngineUid(self, engineId):
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_Module_getHviEngineUid.restype = c_longlong;
return self._SD_Object__core_dll.SD_Module_getHviEngineUid(self._SD_Object__handle, engineId)
else :
return SD_Error.MODULE_NOT_OPENED
class Engine:
__kMasterEngineId = 0
def __init__(self, module):
self.__module = module
@property
def main_engine(self):
return self.__module.getHviEngineUid(self.__kMasterEngineId)
class TriggerModule:
@property
def pxi_0(self):
return 0
@property
def pxi_1(self):
return 1
@property
def pxi_2(self):
return 2
@property
def pxi_3(self):
return 3
@property
def pxi_4(self):
return 4
@property
def pxi_5(self):
return 5
@property
def pxi_6(self):
return 6
@property
def pxi_7(self):
return 7
@property
def front_panel_1(self):
return 8
class TriggerAIO(TriggerModule):
@property
def front_panel_2(self):
return 9
class ActionAwg:
def __init__(self, module):
self.__module = module
@property
def ch1_reset_phase(self):
return self.__module.getAction(constants.SD_AOU_Action_CH1ResetPhase)
@property
def ch2_reset_phase(self):
return self.__module.getAction(constants.SD_AOU_Action_CH2ResetPhase)
@property
def ch3_reset_phase(self):
return self.__module.getAction(constants.SD_AOU_Action_CH3ResetPhase)
@property
def ch4_reset_phase(self):
return self.__module.getAction(constants.SD_AOU_Action_CH4ResetPhase)
@property
def awg1_start(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG1Start)
@property
def awg2_start(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG2Start)
@property
def awg3_start(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG3Start)
@property
def awg4_start(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG4Start)
@property
def awg1_stop(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG1Stop)
@property
def awg2_stop(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG2Stop)
@property
def awg3_stop(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG3Stop)
@property
def awg4_stop(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG4Stop)
@property
def awg1_pause(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG1Pause)
@property
def awg2_pause(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG2Pause)
@property
def awg3_pause(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG3Pause)
@property
def awg4_pause(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG4Pause)
@property
def awg1_resume(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG1Resume)
@property
def awg2_resume(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG2Resume)
@property
def awg3_resume(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG3Resume)
@property
def awg4_resume(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG4Resume)
@property
def awg1_trigger(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG1Trigger)
@property
def awg2_trigger(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG2Trigger)
@property
def awg3_trigger(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG3Trigger)
@property
def awg4_trigger(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG4Trigger)
@property
def awg1_jump_next_waveform(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG1JumpNextWaveform)
@property
def awg2_jump_next_waveform(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG2JumpNextWaveform)
@property
def awg3_jump_next_waveform(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG3JumpNextWaveform)
@property
def awg4_jump_next_waveform(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG4JumpNextWaveform)
@property
def awg1_queue_flush(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG1QueueFlush)
@property
def awg2_queue_flush(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG2QueueFlush)
@property
def awg3_queue_flush(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG3QueueFlush)
@property
def awg4_queue_flush(self):
return self.__module.getAction(constants.SD_AOU_Action_AWG4QueueFlush)
@property
def fpga_user_0(self):
return self.__module.getAction(constants.SD_AOU_Action_UserFpga0)
@property
def fpga_user_1(self):
return self.__module.getAction(constants.SD_AOU_Action_UserFpga1)
@property
def fpga_user_2(self):
return self.__module.getAction(constants.SD_AOU_Action_UserFpga2)
@property
def fpga_user_3(self):
return self.__module.getAction(constants.SD_AOU_Action_UserFpga3)
@property
def fpga_user_4(self):
return self.__module.getAction(constants.SD_AOU_Action_UserFpga4)
@property
def fpga_user_5(self):
return self.__module.getAction(constants.SD_AOU_Action_UserFpga5)
@property
def fpga_user_6(self):
return self.__module.getAction(constants.SD_AOU_Action_UserFpga6)
@property
def fpga_user_7(self):
return self.__module.getAction(constants.SD_AOU_Action_UserFpga7)
class Event:
def __init__(self, module):
self.__module = module
@property
def awg1_queue_empty(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG1QueueEmpty)
@property
def awg2_queue_empty(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG2QueueEmpty)
@property
def awg3_queue_empty(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG3QueueEmpty)
@property
def awg4_queue_empty(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG4QueueEmpty)
@property
def awg1_queue_full(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG1QueueFull)
@property
def awg2_queue_full(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG2QueueFull)
@property
def awg3_queue_full(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG3QueueFull)
@property
def awg4_queue_full(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG4QueueFull)
@property
def awg1_underrun(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG1Underrun)
@property
def awg2_underrun(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG2Underrun)
@property
def awg3_underrun(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG3Underrun)
@property
def awg4_underrun(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG4Underrun)
@property
def awg1_queue_end(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG1QueueEnd)
@property
def awg2_queue_end(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG2QueueEnd)
@property
def awg3_queue_end(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG3QueueEnd)
@property
def awg4_queue_end(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG4QueueEnd)
@property
def awg1_waveform_start(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG1WfStart)
@property
def awg2_waveform_start(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG2WfStart)
@property
def awg3_waveform_start(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG3WfStart)
@property
def awg4_waveform_start(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG4WfStart)
@property
def awg1_queue_marker(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG1QueueMarker)
@property
def awg2_queue_marker(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG2QueueMarker)
@property
def awg3_queue_marker(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG3QueueMarker)
@property
def awg4_queue_marker(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG4QueueMarker)
@property
def awg1_queue_flushed(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG1QueueFlushed)
@property
def awg2_queue_flushed(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG2QueueFlushed)
@property
def awg3_queue_flushed(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG3QueueFlushed)
@property
def awg4_queue_flushed(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG4QueueFlushed)
@property
def awg1_queue_running(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG1QueueRunning)
@property
def awg2_queue_running(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG2QueueRunning)
@property
def awg3_queue_running(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG3QueueRunning)
@property
def awg4_queue_running(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG4QueueRunning)
@property
def fpga_user_0(self):
return self.__module.getEvent(constants.SD_AOU_Event_UserFpgaLoopback0)
@property
def fpga_user_1(self):
return self.__module.getEvent(constants.SD_AOU_Event_UserFpgaLoopback1)
@property
def fpga_user_2(self):
return self.__module.getEvent(constants.SD_AOU_Event_UserFpgaLoopback2)
@property
def fpga_user_3(self):
return self.__module.getEvent(constants.SD_AOU_Event_UserFpgaLoopback3)
@property
def fpga_user_4(self):
return self.__module.getEvent(constants.SD_AOU_Event_UserFpgaLoopback4)
@property
def fpga_user_5(self):
return self.__module.getEvent(constants.SD_AOU_Event_UserFpgaLoopback5)
@property
def fpga_user_6(self):
return self.__module.getEvent(constants.SD_AOU_Event_UserFpgaLoopback6)
@property
def fpga_user_7(self):
return self.__module.getEvent(constants.SD_AOU_Event_UserFpgaLoopback7)
@property
def awg1_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG1TriggerLoopback)
@property
def awg2_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG2TriggerLoopback)
@property
def awg3_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG3TriggerLoopback)
@property
def awg4_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AOU_Event_AWG4TriggerLoopback)
class InstructionParameter:
def __init__(self, module, attributeId):
self.__module = module
self.__attributeId = attributeId
@property
def id(self):
return self.__module.getAttributeId64(self.__attributeId)
class SetAmplitudeInstruction:
def __init__(self,module):
self.__module = module
self.__channel = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_SetAmplitude_Parameters_Channel_Id)
self.__value = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_SetAmplitude_Parameters_Value_Id)
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_SetAmplitude_Id)
@property
def channel(self):
return self.__channel
@property
def value(self):
return self.__value
class WaveShapeValue:
def __init__(self, module):
self.__module = module
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_SetWaveshape_Parameters_Value_Id)
@property
def HIZ(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_SetWaveshape_Parameters_Value_AOU_HIZ_Id)
@property
def AOU_OFF(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_SetWaveshape_Parameters_Value_AOU_OFF_Id)
@property
def AOU_SINUSOIDAL(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_SetWaveshape_Parameters_Value_AOU_SINUSOIDAL_Id)
@property
def AOU_TRIANGULAR(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_SetWaveshape_Parameters_Value_AOU_TRIANGULAR_Id)
@property
def AOU_SQUARE(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_SetWaveshape_Parameters_Value_AOU_SQUARE_Id)
@property
def AOU_DC(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_SetWaveshape_Parameters_Value_AOU_DC_Id)
@property
def AOU_AWG(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_SetWaveshape_Parameters_Value_AOU_AWG_Id)
@property
def AOU_PARTNER_CHANNEL(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_SetWaveshape_Parameters_Value_AOU_PARTNER_Id)
class SetWaveshapeInstruction:
def __init__(self,module):
self.__module = module
self.__value = WaveShapeValue(module)
self.__channel = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_SetWaveshape_Parameters_Channel_Id)
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_SetWaveshape_Id)
@property
def channel(self):
return self.__channel
@property
def value(self):
return self.__value
class SetOffsetInstruction:
def __init__(self,module):
self.__module = module
self.__channel = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_SetOffset_Parameters_Channel_Id)
self.__value = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_SetOffset_Parameters_Value_Id)
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_SetOffset_Id)
@property
def channel(self):
return self.__channel
@property
def value(self):
return self.__value
class SetFrequencyInstruction:
def __init__(self,module):
self.__module = module
self.__channel = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_SetFrequency_Parameters_Channel_Id)
self.__value = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_SetFrequency_Parameters_Value_Id)
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_SetFrequency_Id)
@property
def channel(self):
return self.__channel
@property
def value(self):
return self.__value
class SetPhaseInstruction:
def __init__(self,module):
self.__module = module
self.__channel = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_SetPhase_Parameters_Channel_Id)
self.__value = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_SetPhase_Parameters_Value_Id)
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_SetPhase_Id)
@property
def channel(self):
return self.__channel
@property
def value(self):
return self.__value
class ModeType:
def __init__(self, module):
self.__module = module
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_ModulationFreqPhaseConfig_Parameters_ModType_Id)
@property
def AOU_MOD_OFF(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_ModulationFreqPhaseConfig_Parameters_ModType_AOU_MOD_OFF_Id)
@property
def AOU_MOD_FM(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_ModulationFreqPhaseConfig_Parameters_ModType_AOU_MOD_FM_Id)
@property
def AOU_MOD_PM(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_ModulationFreqPhaseConfig_Parameters_ModType_AOU_MOD_PHASE_Id)
class ModulationFreqPhaseConfigInstruction:
def __init__(self,module):
self.__module = module
self.__modetype = ModeType(module)
self.__channel = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_ModulationFreqPhaseConfig_Parameters_Channel_Id)
self.__devgain = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_ModulationFreqPhaseConfig_Parameters_DevGain_Id )
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_ModulationFreqPhaseConfig_Id)
@property
def channel(self):
return self.__channel
@property
def modulation_type(self):
return self.__modetype
@property
def deviation_gain(self):
return self.__devgain
class ModulationAmpConfigModeType:
def __init__(self, module):
self.__module = module
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_ModulationAmpOffsetConfig_Parameters_ModType_Id)
@property
def AOU_MOD_OFF(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_ModulationFreqPhaseConfig_Parameters_ModType_AOU_MOD_OFF_Id)
@property
def AOU_MOD_AM(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_ModulationFreqPhaseConfig_Parameters_ModType_AOU_MOD_AM_Id)
@property
def AOU_MOD_OFFSET(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_ModulationFreqPhaseConfig_Parameters_ModType_AOU_MOD_OFFSET_Id)
class ModulationAmpOffsetConfigInstruction:
def __init__(self,module):
self.__module = module
self.__channel = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_ModulationAmpOffsetConfig_Parameters_Channel_Id)
self.__modetype = ModulationAmpConfigModeType(module)
self.__gain = InstructionParameter(module, constants.SD_AOU_Hvi_Instructions_ModulationAmpOffsetConfig_Parameters_Gain_Id)
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_ModulationAmpOffsetConfig_Id)
@property
def channel(self):
return self.__channel
@property
def modulation_type(self):
return self.__modetype
@property
def deviation_gain(self):
return self.__gain
class TriggerMode:
def __init__(self, module):
self.__module = module
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_QueueWaveform_Parameters_TriggerMode_Id)
@property
def AUTOTRIG(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_QueueWaveform_Parameters_TriggerMode_AUTOTRIG_Id)
@property
def SWHVITRIG(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_QueueWaveform_Parameters_TriggerMode_SWHVITRIG_Id)
@property
def EXTTRIG(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_QueueWaveform_Parameters_TriggerMode_EXTTRIG_Id)
@property
def SWHVITRIG_CYCLE(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_QueueWaveform_Parameters_TriggerMode_SWHVITRIG_CYCLE_Id)
@property
def EXTTRIG_CYCLE(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_QueueWaveform_Parameters_TriggerMode_EXTTRIG_CYCLE_Id)
class QueueWaveformInstruction:
def __init__(self,module):
self.__module = module
self.__triggerMode = TriggerMode(module)
self.__channel = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_QueueWaveform_Parameters_Channel_Id)
self.__waveform = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_QueueWaveform_Parameters_WaveformId_Id)
self.__cycles = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_QueueWaveform_Parameters_Cycles_Id)
self.__startdelay = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_QueueWaveform_Parameters_StartDelay_Id)
self.__prescaler = InstructionParameter(module,constants.SD_AOU_Hvi_Instructions_QueueWaveform_Parameters_Prescaler_Id)
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AOU_Hvi_Instructions_QueueWaveform_Id)
@property
def channel(self):
return self.__channel
@property
def waveform_number(self):
return self.__waveform
@property
def cycles(self):
return self.__cycles
@property
def start_delay(self):
return self.__startdelay
@property
def prescaler(self):
return self.__prescaler
@property
def trigger_mode(self):
return self.__triggerMode
class InstructionAWG:
def __init__(self, module):
self.__module = module
self.__setamplitude = SetAmplitudeInstruction(module)
self.__setwaveshape = SetWaveshapeInstruction(module)
self.__setoffset = SetOffsetInstruction(module)
self.__setfrequency = SetFrequencyInstruction(module)
self.__setphase = SetPhaseInstruction(module)
self.__modulationfreqphaseconfig = ModulationFreqPhaseConfigInstruction(module)
self.__modulationampoffsetconfig = ModulationAmpOffsetConfigInstruction(module)
self.__queuewaveform = QueueWaveformInstruction(module)
@property
def set_amplitude(self):
return self.__setamplitude
@property
def set_waveshape(self):
return self.__setwaveshape
@property
def set_offset(self):
return self.__setoffset
@property
def set_frequency(self):
return self.__setfrequency
@property
def set_phase(self):
return self.__setphase
@property
def modulation_angle_config(self):
return self.__modulationfreqphaseconfig
@property
def modulation_amplitude_config(self):
return self.__modulationampoffsetconfig
@property
def queue_waveform(self):
return self.__queuewaveform
class SD_AOUHvi:
def __init__(self, module):
self.__module = module
self.__engines = Engine(module)
self.__triggers = TriggerModule()
self.__actions = ActionAwg(module)
self.__events = Event(module)
self.__instructions = InstructionAWG(module)
@property
def engines(self):
return self.__engines
@property
def triggers(self):
return self.__triggers
@property
def actions(self):
return self.__actions
@property
def events(self):
return self.__events
@property
def instruction_set(self):
return self.__instructions
class SD_AOU(SD_Module):
def __init__(self):
super(SD_AOU, self).__init__()
self.__hvi = None
def createHvi(self):
self.__hvi = SD_AOUHvi(self)
@property
def hvi(self):
return self.__hvi
def AWGqueueIsFull(self, nAWG) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGqueueIsFull(self._SD_Object__handle, nAWG);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGqueueIsEmpty(self, nAWG) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGqueueIsEmpty(self._SD_Object__handle, nAWG);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGqueueRemaining(self, nAWG) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGqueueRemaining(self._SD_Object__handle, nAWG);
else :
return SD_Error.MODULE_NOT_OPENED;
def clockGetFrequency(self) :
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_AOU_clockGetFrequency.restype = c_double;
result = self._SD_Object__core_dll.SD_AOU_clockGetFrequency(self._SD_Object__handle);
if result < 0 :
return int(result);
else :
return result;
else :
return SD_Error.MODULE_NOT_OPENED;
def clockGetSyncFrequency(self) :
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_AOU_clockGetSyncFrequency.restype = c_double;
result = self._SD_Object__core_dll.SD_AOU_clockGetSyncFrequency(self._SD_Object__handle);
if result < 0 :
return int(result);
else :
return result;
else :
return SD_Error.MODULE_NOT_OPENED;
def clockSetFrequency(self, frequency, mode = 1) :
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_AOU_clockSetFrequency.restype = c_double;
result = self._SD_Object__core_dll.SD_AOU_clockSetFrequency(self._SD_Object__handle, c_double(frequency), mode);
if result < 0 :
return int(result);
else :
return result;
else :
return SD_Error.MODULE_NOT_OPENED;
def clockResetPhase(self, triggerBehavior, triggerSource, skew = 0.0):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_clockResetPhase(self._SD_Object__handle, triggerBehavior, triggerSource, c_double(skew));
else :
return SD_Error.MODULE_NOT_OPENED;
def setDigitalFilterMode(self, mode):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_setDigitalFilterMode(self._SD_Object__handle, mode);
else :
return SD_Error.MODULE_NOT_OPENED;
def channelAmplitude(self, nChannel, amplitude):
if self._SD_Object__handle > 0:
return self._SD_Object__core_dll.SD_AOU_channelAmplitude(self._SD_Object__handle, nChannel, c_double(amplitude))
else :
return SD_Error.MODULE_NOT_OPENED;
def channelOffset(self, nChannel, offset) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_channelOffset(self._SD_Object__handle, nChannel, c_double(offset));
else :
return SD_Error.MODULE_NOT_OPENED;
def channelWaveShape(self, nChannel, waveShape) :
if self._SD_Object__handle > 0:
return self._SD_Object__core_dll.SD_AOU_channelWaveShape(self._SD_Object__handle, nChannel, waveShape);
else :
return SD_Error.MODULE_NOT_OPENED
def channelFrequency(self, nChannel, frequency) :
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_AOU_channelFrequency.restype = c_double;
return self._SD_Object__core_dll.SD_AOU_channelFrequency(self._SD_Object__handle, nChannel, c_double(frequency));
else :
return SD_Error.MODULE_NOT_OPENED;
def channelPhase(self, nChannel, phase) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_channelPhase(self._SD_Object__handle, nChannel, c_double(phase));
else :
return SD_Error.MODULE_NOT_OPENED;
def channelPhaseReset(self, nChannel) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_channelPhaseReset(self._SD_Object__handle, nChannel);
else :
return SD_Error.MODULE_NOT_OPENED;
def channelPhaseResetMultiple(self, channelMask) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_channelPhaseResetMultiple(self._SD_Object__handle, channelMask);
else :
return SD_Error.MODULE_NOT_OPENED;
def modulationAngleConfig(self, nChannel, modulationType, deviationGain) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_modulationAngleConfig(self._SD_Object__handle, nChannel, modulationType, c_double(deviationGain));
else :
return SD_Error.MODULE_NOT_OPENED;
def modulationAmplitudeConfig(self, nChannel, modulationType, deviationGain) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_modulationAmplitudeConfig(self._SD_Object__handle, nChannel, modulationType, c_double(deviationGain));
else :
return SD_Error.MODULE_NOT_OPENED;
def modulationIQconfig(self, nChannel, enable) :
if self._SD_Object__handle > 0:
return self._SD_Object__core_dll.SD_AOU_modulationIQconfig(self._SD_Object__handle, nChannel, enable);
else :
return SD_Error.MODULE_NOT_OPENED;
def clockIOconfig(self, clockConfig) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_clockIOconfig(self._SD_Object__handle, clockConfig);
else :
return SD_Error.MODULE_NOT_OPENED;
def triggerIOconfig(self, direction) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_triggerIOconfig(self._SD_Object__handle, direction);
else :
return SD_Error.MODULE_NOT_OPENED;
def triggerIOwrite(self, value, syncMode = 1) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_triggerIOwrite(self._SD_Object__handle, value, syncMode);
else :
return SD_Error.MODULE_NOT_OPENED;
def triggerIOread(self) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_triggerIOread(self._SD_Object__handle);
else :
return SD_Error.MODULE_NOT_OPENED;
def waveformReLoad(self, waveformObject, waveformNumber, paddingMode = 0) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_waveformReLoad(self._SD_Object__handle, waveformObject._SD_Object__handle, waveformNumber, paddingMode);
else :
return SD_Error.MODULE_NOT_OPENED;
def waveformReLoadArrayInt16(self, waveformType, dataRaw, waveformNumber, paddingMode = 0) :
if self._SD_Object__handle > 0 :
if len(dataRaw) > 0 :
data_np = to_numpy_int16(dataRaw)
data_C = data_np.ctypes.data_as(POINTER(c_short*len(data_np))).contents
return self._SD_Object__core_dll.SD_AOU_waveformReLoadArrayInt16(self._SD_Object__handle, waveformType, dataC._length_, dataC, waveformNumber, paddingMode);
else :
return SD_Error.INVALID_VALUE;
else :
return SD_Error.MODULE_NOT_OPENED;
def waveformLoad(self, waveformObject, waveformNumber, paddingMode = 0) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_waveformLoad(self._SD_Object__handle, waveformObject._SD_Object__handle, waveformNumber, paddingMode);
else :
return SD_Error.MODULE_NOT_OPENED;
def waveformLoadInt16(self, waveformType, dataRaw, waveformNumber, paddingMode = 0) :
if self._SD_Object__handle > 0 :
if len(dataRaw) > 0 :
data_np = to_numpy_int16(dataRaw)
data_C = data_np.ctypes.data_as(POINTER(c_short*len(data_np))).contents
return self._SD_Object__core_dll.SD_AOU_waveformLoadArrayInt16(self._SD_Object__handle, waveformType, dataC._length_, dataC, waveformNumber, paddingMode);
else :
return SD_Error.INVALID_VALUE;
else :
return SD_Error.MODULE_NOT_OPENED;
def waveformAddToList(self, waveformObject, waveformNumber, paddingMode = 0) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_waveformAddToList(self._SD_Object__handle, waveformObject._SD_Object__handle, waveformNumber, paddingMode);
else :
return SD_Error.MODULE_NOT_OPENED;
def waveformListLoad(self) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_waveformListLoad(self._SD_Object__handle);
else :
return SD_Error.MODULE_NOT_OPENED;
def waveformFlush(self) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_waveformFlush(self._SD_Object__handle);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGqueueWaveform(self, nAWG, waveformNumber, triggerMode, startDelay, cycles, prescaler) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGqueueWaveform(self._SD_Object__handle, nAWG, waveformNumber, triggerMode, startDelay, cycles, prescaler);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGstartMultiple(self, AWGmask) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGstartMultiple(self._SD_Object__handle, AWGmask);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGstopMultiple(self, AWGmask) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGstopMultiple(self._SD_Object__handle, AWGmask);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGresumeMultiple(self, AWGmask) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGresumeMultiple(self._SD_Object__handle, AWGmask);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGpauseMultiple(self, AWGmask) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGpauseMultiple(self._SD_Object__handle, AWGmask);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGtriggerMultiple(self, AWGmask) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGtriggerMultiple(self._SD_Object__handle, AWGmask);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGjumpNextWaveformMultiple(self, AWGmask) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGjumpNextWaveformMultiple(self._SD_Object__handle, AWGmask);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGstart(self, nAWG) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGstart(self._SD_Object__handle, nAWG);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGstop(self, nAWG) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGstop(self._SD_Object__handle, nAWG);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGresume(self, nAWG) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGresume(self._SD_Object__handle, nAWG);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGpause(self, nAWG) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGpause(self._SD_Object__handle, nAWG);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGtrigger(self, nAWG) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGtrigger(self._SD_Object__handle, nAWG);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGjumpNextWaveform(self, nAWG) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGjumpNextWaveform(self._SD_Object__handle, nAWG);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGflush(self, nAWG) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGflush(self._SD_Object__handle, nAWG);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGisRunning(self, nAWG) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGisRunning(self._SD_Object__handle, nAWG);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGnWFplaying(self, nAWG) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGnWFplaying(self._SD_Object__handle, nAWG);
else :
return SD_Error.MODULE_NOT_OPENED;
def __AWGfromArrayInt(self, nAWG, triggerMode, startDelay, cycles, prescaler, waveformType, waveformDataA, waveformDataB, paddingMode) :
waveform_dataA_np = to_numpy_int32(waveformDataA)
waveform_dataA_C = waveform_dataA_np.ctypes.data_as(POINTER(c_int32*len(waveform_dataA))).contents
if waveformDataB is None:
waveform_dataB_C = c_void_p(0);
else :
waveform_dataB_np = to_numpy_int32(waveformDataB)
waveform_dataB_C = waveform_dataB_np.ctypes.data_as(POINTER(c_int32*len(waveform_dataB))).contents
return self._SD_Object__core_dll.SD_AOU_AWGfromArrayInteger(self._SD_Object__handle, nAWG, triggerMode, startDelay, cycles, prescaler, waveformType, waveform_dataA_C._length_, waveform_dataA_C, waveform_dataB_C, paddingMode)
def AWGfromArray(self, nAWG, triggerMode, startDelay, cycles, prescaler, waveformType, waveformDataA, waveformDataB = None, paddingMode = 0) :
if self._SD_Object__handle > 0 :
if len(waveformDataA) > 0 and (waveformDataB is None or len(waveformDataA) == len(waveformDataB)) :
if waveformType == SD_WaveformTypes.WAVE_DIGITAL :
return self.__AWGfromArrayInt(nAWG, triggerMode, startDelay, cycles, prescaler, waveformType, waveformDataA,waveformDataB, paddingMode)
else :
waveform_dataA_np = to_numpy_float(waveformDataA)
waveform_dataA_C = waveform_dataA_np.ctypes.data_as(POINTER(c_double*len(waveform_dataA))).contents
if waveformDataB is None:
waveform_dataB_C = c_void_p(0);
else :
waveform_dataB_np = to_numpy_float(waveformDataB)
waveform_dataB_C = waveform_dataB_np.ctypes.data_as(POINTER(c_double*len(waveform_dataB))).contents
return self._SD_Object__core_dll.SD_AOU_AWGfromArray(self._SD_Object__handle, nAWG, triggerMode, startDelay, cycles, prescaler, waveformType, waveform_dataA_C._length_, waveform_dataA_C, waveform_dataB_C, paddingMode)
else :
return SD_Error.INVALID_VALUE
else :
return SD_Error.MODULE_NOT_OPENED
def AWGFromFile(self, nAWG, waveformFile, triggerMode, startDelay, cycles, prescaler, paddingMode = 0) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGfromFile(self._SD_Object__handle, nAWG, waveformFile.encode(), triggerMode, startDelay, cycles, prescaler, paddingMode)
else :
return SD_Error.MODULE_NOT_OPENED
def AWGtriggerExternalConfig(self, nAWG, externalSource, triggerBehavior, sync = SD_SyncModes.SYNC_CLK10) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGtriggerExternalConfig(self._SD_Object__handle, nAWG, externalSource, triggerBehavior, sync);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGqueueConfig(self, nAWG, mode) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGqueueConfig(self._SD_Object__handle, nAWG, mode);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGqueueConfigRead(self, nAWG) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGqueueConfigRead(self._SD_Object__handle, nAWG);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGfreezeOnStopEnable(self, nAWG, mode) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGfreezeOnStopEnable(self._SD_Object__handle, nAWG, mode);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGisFreezeOnStopEnabled(self, nAWG) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGisFreezeOnStopEnabled(self._SD_Object__handle, nAWG);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGqueueMarkerConfig(self, nAWG, markerMode, trgPXImask, trgIOmask, value, syncMode, length, delay) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGqueueMarkerConfig(self._SD_Object__handle, nAWG, markerMode, trgPXImask, trgIOmask, value, syncMode, length, delay);
else :
return SD_Error.MODULE_NOT_OPENED;
def AWGqueueSyncMode(self, nAWG, syncMode) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_AWGqueueSyncMode(self._SD_Object__handle, nAWG, syncMode);
else :
return SD_Error.MODULE_NOT_OPENED;
def voltsToInt(self, volts) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_voltsToInt(self._SD_Object__handle, c_double(volts));
else :
return SD_Error.MODULE_NOT_OPENED;
def freqToInt(self, freq) :
converted = c_longlong(0);
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_AOU_freqToInt.restype = c_longlong;
converted = self._SD_Object__core_dll.SD_AOU_freqToInt(self._SD_Object__handle, c_double(freq));
return converted;
else :
return SD_Error.MODULE_NOT_OPENED;
def phaseToInt(self, phase) :
converted = c_longlong(0);
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_AOU_phaseToInt.restype = c_longlong;
converted = self._SD_Object__core_dll.SD_AOU_phaseToInt(self._SD_Object__handle, c_double(phase));
return converted;
else :
return SD_Error.MODULE_NOT_OPENED;
def freqGainToInt(self, freqGain) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_freqGainToInt(self._SD_Object__handle, c_double(freqGain));
else :
return SD_Error.MODULE_NOT_OPENED;
def phaseGainToInt(self, phaseGain) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_phaseGainToInt(self._SD_Object__handle, c_double(phaseGain));
else :
return SD_Error.MODULE_NOT_OPENED;
def getAction(self, actionId):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_getAction(self._SD_Object__handle, actionId)
else :
return SD_Error.MODULE_NOT_OPENED
def getEvent(self, eventId):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AOU_getEvent(self._SD_Object__handle, eventId)
else :
return SD_Error.MODULE_NOT_OPENED
def getAttributeId64(self, attributeId):
if self._SD_Object__handle > 0 :
id = c_longlong(0);
self._SD_Object__core_dll.SD_Module_Hvi_getAttributeId64(self._SD_Object__handle, attributeId, byref(id))
return id.value
else :
return SD_Error.MODULE_NOT_OPENED
class SD_AIN_TriggerMode :
RISING_EDGE = 1;
FALLING_EDGE = 2;
BOTH_EDGES = 3;
class AIN_Coupling :
AIN_COUPLING_DC = 0;
AIN_COUPLING_AC = 1;
class AIN_Impedance :
AIN_IMPEDANCE_HZ = 0;
AIN_IMPEDANCE_50 = 1;
class ActionDig:
def __init__(self, module):
self.__module = module
@property
def daq1_start(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ1Start)
@property
def daq2_start(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ2Start)
@property
def daq3_start(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ3Start)
@property
def daq4_start(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ4Start)
@property
def daq1_stop(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ1Stop)
@property
def daq2_stop(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ2Stop)
@property
def daq3_stop(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ3Stop)
@property
def daq4_stop(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ4Stop)
@property
def daq1_resume(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ1Resume)
@property
def daq2_resume(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ2Resume)
@property
def daq3_resume(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ3Resume)
@property
def daq4_resume(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ4Resume)
@property
def daq1_trigger(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ1Trigger)
@property
def daq2_trigger(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ2Trigger)
@property
def daq3_trigger(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ3Trigger)
@property
def daq4_trigger(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ4Trigger)
@property
def daq1_flush(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ1Flush)
@property
def daq2_flush(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ2Flush)
@property
def daq3_flush(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ3Flush)
@property
def daq4_flush(self):
return self.__module.getAction(constants.SD_AIN_Action_DAQ4Flush)
@property
def fpga_user_0(self):
return self.__module.getAction(constants.SD_AIN_Action_UserFpga0)
@property
def fpga_user_1(self):
return self.__module.getAction(constants.SD_AIN_Action_UserFpga1)
@property
def fpga_user_2(self):
return self.__module.getAction(constants.SD_AIN_Action_UserFpga2)
@property
def fpga_user_3(self):
return self.__module.getAction(constants.SD_AIN_Action_UserFpga3)
@property
def fpga_user_4(self):
return self.__module.getAction(constants.SD_AIN_Action_UserFpga4)
@property
def fpga_user_5(self):
return self.__module.getAction(constants.SD_AIN_Action_UserFpga5)
@property
def fpga_user_6(self):
return self.__module.getAction(constants.SD_AIN_Action_UserFpga6)
@property
def fpga_user_7(self):
return self.__module.getAction(constants.SD_AIN_Action_UserFpga7)
class EventDig:
def __init__(self, module):
self.__module = module
@property
def daq1_empty(self):
return self.__module.getEvent(constants.SD_AIN_Event_DAQ1Empty)
@property
def daq2_empty(self):
return self.__module.getEvent(constants.SD_AIN_Event_DAQ2Empty)
@property
def daq3_empty(self):
return self.__module.getEvent(constants.SD_AIN_Event_DAQ3Empty)
@property
def daq4_empty(self):
return self.__module.getEvent(constants.SD_AIN_Event_DAQ4Empty)
@property
def daq1_running(self):
return self.__module.getEvent(constants.SD_AIN_Event_DAQ1Running)
@property
def daq2_running(self):
return self.__module.getEvent(constants.SD_AIN_Event_DAQ2Running)
@property
def daq3_running(self):
return self.__module.getEvent(constants.SD_AIN_Event_DAQ3Running)
@property
def daq4_running(self):
return self.__module.getEvent(constants.SD_AIN_Event_DAQ4Running)
@property
def daq1_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AIN_Event_DAQ1TriggerLoopback)
@property
def daq2_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AIN_Event_DAQ2TriggerLoopback)
@property
def daq3_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AIN_Event_DAQ3TriggerLoopback)
@property
def daq4_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AIN_Event_DAQ4TriggerLoopback)
@property
def fpga_user_0(self):
return self.__module.getEvent(constants.SD_AIN_Event_FpgaUser0)
@property
def fpga_user_1(self):
return self.__module.getEvent(constants.SD_AIN_Event_UserFpgaLoopback1)
@property
def fpga_user_2(self):
return self.__module.getEvent(constants.SD_AIN_Event_UserFpgaLoopback2)
@property
def fpga_user_3(self):
return self.__module.getEvent(constants.SD_AIN_Event_UserFpgaLoopback3)
@property
def fpga_user_4(self):
return self.__module.getEvent(constants.SD_AIN_Event_UserFpgaLoopback4)
@property
def fpga_user_5(self):
return self.__module.getEvent(constants.SD_AIN_Event_UserFpgaLoopback5)
@property
def fpga_user_6(self):
return self.__module.getEvent(constants.SD_AIN_Event_UserFpgaLoopback6)
@property
def fpga_user_7(self):
return self.__module.getEvent(constants.SD_AIN_Event_UserFpgaLoopback7)
class TriggerModeDaqConfig:
def __init__(self, module):
self.__module = module
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AIN_Hvi_Instructions_DaqConfigInstruction_Parameters_TriggerMode_Id)
@property
def AUTOTRIG(self):
return self.__module.getAttributeId64(constants.SD_AIN_Hvi_Instructions_DaqConfigInstruction_Parameters_TriggerMode_AUTOTRIG_Id)
@property
def SWHVITRIG(self):
return self.__module.getAttributeId64(constants.SD_AIN_Hvi_Instructions_DaqConfigInstruction_Parameters_TriggerMode_SWHVITRIG1_Id)
@property
def HWDIGTRIG(self):
return self.__module.getAttributeId64(constants.SD_AIN_Hvi_Instructions_DaqConfigInstruction_Parameters_TriggerMode_HWDIGTRIG_Id)
@property
def HWANATRIG(self):
return self.__module.getAttributeId64(constants.SD_AIN_Hvi_Instructions_DaqConfigInstruction_Parameters_TriggerMode_HWANATRIG_Id)
class DaqConfigInstruction:
def __init__(self,module):
self.__module = module
self.__channel = InstructionParameter(module,constants.SD_AIN_Hvi_Instructions_DaqConfigInstruction_Parameters_Channel_Id)
self.__cycles = InstructionParameter(module,constants.SD_AIN_Hvi_Instructions_DaqConfigInstruction_Parameters_Cycles_Id)
self.__pointspercycle = InstructionParameter(module,constants.SD_AIN_Hvi_Instructions_DaqConfigInstruction_Parameters_PointsPerCycle_Id)
self.__triggermode = TriggerModeDaqConfig(module)
self.__triggerdelay = InstructionParameter(module,constants.SD_AIN_Hvi_Instructions_DaqConfigInstruction_Parameters_TriggerDelay_Id)
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AIN_Hvi_Instructions_DaqConfigInstruction_Id)
@property
def channel(self):
return self.__channel
@property
def cycles(self):
return self.__cycles
@property
def daq_points_per_cycle(self):
return self.__pointspercycle
@property
def trigger_delay(self):
return self.__triggerdelay
@property
def trigger_mode(self):
return self.__triggermode
class AnalogTrigModeChnlConfig:
def __init__(self, module):
self.__module = module
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AIN_Hvi_Instructions_ChannelTriggerConfigInstruction_Parameters_AnalogTriggerMode_Id)
@property
def AIN_RISING_EDGE(self):
return self.__module.getAttributeId64(constants.SD_AIN_Hvi_Instructions_ChannelTriggerConfigInstruction_Parameters_AnalogTriggerMode_AIN_RISING_EDGE_Id)
@property
def AIN_FALLING_EDGE(self):
return self.__module.getAttributeId64(constants.SD_AIN_Hvi_Instructions_ChannelTriggerConfigInstruction_Parameters_AnalogTriggerMode_AIN_FALLING_EDGE_Id)
@property
def AIN_BOTH_EDGES(self):
return self.__module.getAttributeId64(constants.SD_AIN_Hvi_Instructions_ChannelTriggerConfigInstruction_Parameters_AnalogTriggerMode_AIN_BOTH_EDGES_Id)
class ChannelTriggerConfigInstruction:
def __init__(self,module):
self.__module = module
self.__channel = InstructionParameter(module,constants.SD_AIN_Hvi_Instructions_ChannelTriggerConfigInstruction_Parameters_Channel_Id)
self.__analogtriggermode = AnalogTrigModeChnlConfig(module)
self.__threshold = InstructionParameter(module,constants.SD_AIN_Hvi_Instructions_ChannelTriggerConfigInstruction_Parameters_Threshold_Id)
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AIN_Hvi_Instructions_ChannelTriggerConfigInstruction_Id)
@property
def channel(self):
return self.__channel
@property
def analog_trigger_mode(self):
return self.__analogtriggermode
@property
def threshold(self):
return self.__threshold
class DaqAnalogTriggerConfigInstruction:
def __init__(self,module):
self.__module = module
self.__channel = InstructionParameter(module,constants.SD_AIN_Hvi_Instructions_DaqAnalogTriggerConfigInstruction_Parameters_Channel_Id)
self.__analogtriggermask = InstructionParameter(module,constants.SD_AIN_Hvi_Instructions_DaqAnalogTriggerConfigInstruction_Parameters_AnalogTriggerMask_Id)
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AIN_Hvi_Instructions_DaqAnalogTriggerConfigInstruction_Id)
@property
def channel(self):
return self.__channel
@property
def analog_trigger_mask(self):
return self.__analogtriggermask
class ChannelPrescalerConfigInstruction:
def __init__(self,module):
self.__module = module
self.__channel = InstructionParameter(module,constants.SD_AIN_Hvi_Instructions_ChannelPrescalerConfigInstruction_Parameters_Channel_Id)
self.__prescaler = InstructionParameter(module,constants.SD_AIN_Hvi_Instructions_ChannelPrescalerConfigInstruction_Parameters_Prescaler_Id)
@property
def id(self):
return self.__module.getAttributeId64(constants.SD_AIN_Hvi_Instructions_ChannelPrescalerConfigInstruction_Id)
@property
def channel(self):
return self.__channel
@property
def prescaler(self):
return self.__prescaler
class InstructionDIG:
def __init__(self, module):
self.__module = module
self.__daqconfig = DaqConfigInstruction(module)
self.__chnltriggerconfig = ChannelTriggerConfigInstruction(module)
self.__daqanalogtriggerconfig = DaqAnalogTriggerConfigInstruction(module)
self.__chnlprescalerconfig = ChannelPrescalerConfigInstruction(module)
@property
def daq_config(self):
return self.__daqconfig
@property
def channel_trigger_config(self):
return self.__chnltriggerconfig
@property
def daq_analog_trigger_config(self):
return self.__daqanalogtriggerconfig
@property
def channel_prescaler_config(self):
return self.__chnlprescalerconfig
class SD_AINHvi:
def __init__(self, module):
self.__module = module
self.__engines = Engine(module)
self.__triggers = TriggerModule()
self.__actions = ActionDig(module)
self.__events = EventDig(module)
self.__instructions = InstructionDIG(module)
@property
def engines(self):
return self.__engines
@property
def triggers(self):
return self.__triggers
@property
def actions(self):
return self.__actions
@property
def events(self):
return self.__events
@property
def instruction_set(self):
return self.__instructions
class SD_AIN(SD_Module) :
def __init__(self):
super(SD_AIN, self).__init__()
self.__hvi = None
def createHvi(self):
self.__hvi = SD_AINHvi(self)
@property
def hvi(self):
return self.__hvi
def getAttributeId64(self, attributeId):
if self._SD_Object__handle > 0 :
id = c_longlong(0);
self._SD_Object__core_dll.SD_Module_Hvi_getAttributeId64(self._SD_Object__handle, attributeId, byref(id))
return id.value
else :
return SD_Error.MODULE_NOT_OPENED
def voltsToInt(self, channel, volts) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_voltsToInt(self._SD_Object__handle, channel, c_double(volts));
else :
return SD_Error.MODULE_NOT_OPENED;
def channelInputConfig(self, channel, fullScale, impedance, coupling) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_channelInputConfig(self._SD_Object__handle, channel, c_double(fullScale), impedance, coupling);
else :
return SD_Error.MODULE_NOT_OPENED;
def channelPrescalerConfig(self, channel, prescaler) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_channelPrescalerConfig(self._SD_Object__handle, channel, prescaler);
else :
return SD_Error.MODULE_NOT_OPENED;
def channelPrescalerConfigMultiple(self, mask, prescaler) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_channelPrescalerConfigMultiple(self._SD_Object__handle, mask, prescaler);
else :
return SD_Error.MODULE_NOT_OPENED;
def channelPrescaler(self, channel) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_channelPrescaler(self._SD_Object__handle, channel);
else :
return SD_Error.MODULE_NOT_OPENED;
def channelFullScale(self, channel) :
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_AIN_channelFullScale.restype = c_double;
result = self._SD_Object__core_dll.SD_AIN_channelFullScale(self._SD_Object__handle, channel);
if result < 0 :
return int(result);
else :
return result;
else :
return SD_Error.MODULE_NOT_OPENED;
def channelMinFullScale(self, impedance, coupling) :
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_AIN_channelMinFullScale.restype = c_double;
result = self._SD_Object__core_dll.SD_AIN_channelMinFullScale(self._SD_Object__handle, impedance, coupling);
if result < 0 :
return int(result);
else :
return result;
else :
return SD_Error.MODULE_NOT_OPENED;
def channelMaxFullScale(self, impedance, coupling) :
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_AIN_channelMaxFullScale.restype = c_double;
result = self._SD_Object__core_dll.SD_AIN_channelMaxFullScale(self._SD_Object__handle, impedance, coupling);
if result < 0 :
return int(result);
else :
return result;
else :
return SD_Error.MODULE_NOT_OPENED;
def channelImpedance(self, channel) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_channelImpedance(self._SD_Object__handle, channel);
else :
return SD_Error.MODULE_NOT_OPENED;
def channelCoupling(self, channel) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_channelCoupling(self._SD_Object__handle, channel);
else :
return SD_Error.MODULE_NOT_OPENED;
def channelTriggerConfig(self, channel, analogTriggerMode, threshold) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_channelTriggerConfig(self._SD_Object__handle, channel, analogTriggerMode, c_double(threshold));
else :
return SD_Error.MODULE_NOT_OPENED;
def clockIOconfig(self, clockConfig) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_clockIOconfig(self._SD_Object__handle, clockConfig);
else :
return SD_Error.MODULE_NOT_OPENED;
def clockGetFrequency(self) :
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_AIN_clockGetFrequency.restype = c_double;
result = self._SD_Object__core_dll.SD_AIN_clockGetFrequency(self._SD_Object__handle);
if result < 0 :
return int(result);
else :
return result;
else :
return SD_Error.MODULE_NOT_OPENED;
def clockGetSyncFrequency(self) :
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_AIN_clockGetSyncFrequency.restype = c_double;
result = self._SD_Object__core_dll.SD_AIN_clockGetSyncFrequency(self._SD_Object__handle);
if result < 0 :
return int(result);
else :
return result;
else :
return SD_Error.MODULE_NOT_OPENED;
def clockSetFrequency(self, frequency, mode = 1) :
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_AIN_clockSetFrequency.restype = c_double;
result = self._SD_Object__core_dll.SD_AIN_clockSetFrequency(self._SD_Object__handle, c_double(frequency), mode);
if result < 0 :
return int(result);
else :
return result;
else :
return SD_Error.MODULE_NOT_OPENED;
def clockResetPhase(self, triggerBehavior, PXItrigger, skew = 0.0) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_clockResetPhase(self._SD_Object__handle, triggerBehavior, PXItrigger, c_double(skew));
else :
return SD_Error.MODULE_NOT_OPENED;
def triggerIOconfig(self, direction) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_triggerIOconfig(self._SD_Object__handle, direction);
else :
return SD_Error.MODULE_NOT_OPENED;
def triggerIOwrite(self, value, syncMode = 1) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_triggerIOwrite(self._SD_Object__handle, value, syncMode);
else :
return SD_Error.MODULE_NOT_OPENED;
def triggerIOread(self) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_triggerIOread(self._SD_Object__handle);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQconfig(self, channel, pointsPerCycle, nCycles, triggerDelay, triggerMode) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQconfig(self._SD_Object__handle, channel, pointsPerCycle, nCycles, triggerDelay, triggerMode);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQtriggerConfig(self, channel, digitalTriggerMode, digitalTriggerSource, analogTriggerMask) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQtriggerConfig(self._SD_Object__handle, channel, digitalTriggerMode, digitalTriggerSource, analogTriggerMask);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQanalogTriggerConfig(self, channel, analogTriggerMask) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQanalogTriggerConfig(self._SD_Object__handle, channel, analogTriggerMask);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQdigitalTriggerConfig(self, channel, triggerSource, triggerBehavior) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQdigitalTriggerConfig(self._SD_Object__handle, channel, triggerSource, triggerBehavior);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQtriggerExternalConfig(self, nDAQ, externalSource, triggerBehavior, sync = SD_SyncModes.SYNC_NONE) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQtriggerExternalConfig(self._SD_Object__handle, nDAQ, externalSource, triggerBehavior, sync);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQnPoints(self,nDAQ) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQnPoints(self._SD_Object__handle, nDAQ);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQstart(self, channel) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQstart(self._SD_Object__handle, channel);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQpause(self, channel) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQpause(self._SD_Object__handle, channel);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQresume(self, channel) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQresume(self._SD_Object__handle, channel);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQstop(self, channel) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQstop(self._SD_Object__handle, channel);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQflush(self, channel) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQflush(self._SD_Object__handle, channel);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQtrigger(self, channel) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQtrigger(self._SD_Object__handle, channel);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQstartMultiple(self, DAQmask) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQstartMultiple(self._SD_Object__handle, DAQmask);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQpauseMultiple(self, DAQmask) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQpauseMultiple(self._SD_Object__handle, DAQmask);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQresumeMultiple(self, DAQmask) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQresumeMultiple(self._SD_Object__handle, DAQmask);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQstopMultiple(self, DAQmask) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQstopMultiple(self._SD_Object__handle, DAQmask);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQflushMultiple(self, DAQmask) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQflushMultiple(self._SD_Object__handle, DAQmask);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQtriggerMultiple(self, DAQmask) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQtriggerMultiple(self._SD_Object__handle, DAQmask);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQread(self, nDAQ, nPoints, timeOut = 0) :
if self._SD_Object__handle > 0 :
if nPoints > 0 :
data = (c_short * nPoints)()
nPointsOrError = self._SD_Object__core_dll.SD_AIN_DAQread(self._SD_Object__handle, nDAQ, data, nPoints, timeOut)
if nPointsOrError > 0 :
return np.array(cast(data, POINTER(c_short*nPointsOrError)).contents)
elif nPointsOrError < 0 :
return nPointsOrError
else :
return np.empty(0, dtype=np.short)
else :
return SD_Error.INVALID_VALUE
else :
return SD_Error.MODULE_NOT_OPENED
def DAQcounterRead(self, nDAQ) :
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQcounterRead(self._SD_Object__handle, nDAQ);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQbufferPoolConfig(self, nDAQ, nPoints, timeOut = 0):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQbufferPoolConfig(self._SD_Object__handle, nDAQ, c_void_p(0), nPoints, timeOut, c_void_p(0), c_void_p(0));
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQbufferPoolRelease(self, nDAQ):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_DAQbufferPoolRelease(self._SD_Object__handle, nDAQ);
else :
return SD_Error.MODULE_NOT_OPENED;
def DAQbufferGet(self, nDAQ):
if self._SD_Object__handle > 0 :
self._SD_Object__core_dll.SD_AIN_DAQbufferGet.restype = POINTER(c_short)
error = c_int32()
readPoints = c_int32()
data = self._SD_Object__core_dll.SD_AIN_DAQbufferGet(self._SD_Object__handle, nDAQ, byref(readPoints), byref(error))
error = error.value
if error < 0 :
return error
else :
nPoints = readPoints.value
if nPoints > 0 :
return np.ctypeslib.as_array((c_short*nPoints).from_address(addressof(data.contents)))
else :
return np.empty(0, dtype=np.short)
else :
return SD_Error.MODULE_NOT_OPENED
def FFT(self, channel, data, dB = False, windowType = 0) :
error = SD_Error.INVALID_PARAMETERS
if self._SD_Object__handle > 0 :
if data is not None :
size = len(data)
if size > 0 :
resultSize = int(ceil(pow(2, ceil(log(size, 2)))/2))
# dataC = (c_short * size)(*data)
data_np = to_numpy_int16(data)
dataC = data_np.ctypes.data_as(POINTER(c_short*len(data_np))).contents
moduleC = (c_double * resultSize)()
phaseC = (c_double * resultSize)()
resultSize = self._SD_Object__core_dll.SD_AIN_FFT(self._SD_Object__handle, channel, dataC, size, moduleC, resultSize, phaseC, dB, windowType)
if resultSize > 0 :
moduleData = np.array(moduleC)
phaseData = np.array(phaseC)
else :
moduleData = np.empty(0, dtype=np.double)
phaseData = np.empty(0, dtype=np.double)
return (moduleData, phaseData)
else :
error = SD_Error.MODULE_NOT_OPENED
return error
def getAction(self, actionId):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_getAction(self._SD_Object__handle, actionId)
else :
return SD_Error.MODULE_NOT_OPENED
def getEvent(self, eventId):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIN_getEvent(self._SD_Object__handle, eventId)
else :
return SD_Error.MODULE_NOT_OPENED
class ActionAio :
def __init__(self, module):
self.__module = module
@property
def ch1_reset_phase(self):
return self.__module.getAction(constants.SD_AIO_Action_CH1ResetPhase)
@property
def ch2_reset_phase(self):
return self.__module.getAction(constants.SD_AIO_Action_CH2ResetPhase)
@property
def ch3_reset_phase(self):
return self.__module.getAction(constants.SD_AIO_Action_CH3ResetPhase)
@property
def ch4_reset_phase(self):
return self.__module.getAction(constants.SD_AIO_Action_CH4ResetPhase)
@property
def awg1_start(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG1Start)
@property
def awg2_start(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG2Start)
@property
def awg3_start(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG3Start)
@property
def awg4_start(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG4Start)
@property
def awg1_stop(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG1Stop)
@property
def awg2_stop(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG2Stop)
@property
def awg3_stop(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG3Stop)
@property
def awg4_stop(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG4Stop)
@property
def awg1_pause(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG1Pause)
@property
def awg2_pause(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG2Pause)
@property
def awg3_pause(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG3Pause)
@property
def awg4_pause(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG4Pause)
@property
def awg1_resume(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG1Resume)
@property
def awg2_resume(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG2Resume)
@property
def awg3_resume(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG3Resume)
@property
def awg4_resume(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG4Resume)
@property
def awg1_trigger(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG1Trigger)
@property
def awg2_trigger(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG2Trigger)
@property
def awg3_trigger(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG3Trigger)
@property
def awg4_trigger(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG4Trigger)
@property
def awg1_jump_next_waveform(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG1JumpNextWaveform)
@property
def awg2_jump_next_waveform(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG2JumpNextWaveform)
@property
def awg3_jump_next_waveform(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG3JumpNextWaveform)
@property
def awg4_jump_next_waveform(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG4JumpNextWaveform)
@property
def awg1_queue_flush(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG1QueueFlush)
@property
def awg2_queue_flush(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG2QueueFlush)
@property
def awg3_queue_flush(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG3QueueFlush)
@property
def awg4_queue_flush(self):
return self.__module.getAction(constants.SD_AIO_Action_AWG4QueueFlush)
#*************DIG************
@property
def daq1_start(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ1Start)
@property
def daq2_start(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ2Start)
@property
def daq3_start(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ3Start)
@property
def daq4_start(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ4Start)
@property
def daq1_stop(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ1Stop)
@property
def daq2_stop(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ2Stop)
@property
def daq3_stop(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ3Stop)
@property
def daq4_stop(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ4Stop)
@property
def daq1_resume(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ1Resume)
@property
def daq2_resume(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ2Resume)
@property
def daq3_resume(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ3Resume)
@property
def daq4_resume(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ4Resume)
@property
def daq1_trigger(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ1Trigger)
@property
def daq2_trigger(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ2Trigger)
@property
def daq3_trigger(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ3Trigger)
@property
def daq4_trigger(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ4Trigger)
@property
def daq1_flush(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ1Flush)
@property
def daq2_flush(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ2Flush)
@property
def daq3_flush(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ3Flush)
@property
def daq4_flush(self):
return self.__module.getAction(constants.SD_AIO_Action_DAQ4Flush)
@property
def fpga_user_0(self):
return self.__module.getAction(constants.SD_AIO_Action_UserFpga0)
@property
def fpga_user_1(self):
return self.__module.getAction(constants.SD_AIO_Action_UserFpga1)
@property
def fpga_user_2(self):
return self.__module.getAction(constants.SD_AIO_Action_UserFpga2)
@property
def fpga_user_3(self):
return self.__module.getAction(constants.SD_AIO_Action_UserFpga3)
@property
def fpga_user_4(self):
return self.__module.getAction(constants.SD_AIO_Action_UserFpga4)
@property
def fpga_user_5(self):
return self.__module.getAction(constants.SD_AIO_Action_UserFpga5)
@property
def fpga_user_6(self):
return self.__module.getAction(constants.SD_AIO_Action_UserFpga6)
@property
def fpga_user_7(self):
return self.__module.getAction(constants.SD_AIO_Action_UserFpga7)
class EventAio:
def __init__(self, module):
self.__module = module
@property
def awg1_queue_empty(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG1QueueEmpty)
@property
def awg2_queue_empty(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG2QueueEmpty)
@property
def awg3_queue_empty(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG3QueueEmpty)
@property
def awg4_queue_empty(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG4QueueEmpty)
@property
def awg1_queue_full(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG1QueueFull)
@property
def awg2_queue_full(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG2QueueFull)
@property
def awg3_queue_full(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG3QueueFull)
@property
def awg4_queue_full(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG4QueueFull)
@property
def awg1_underrun(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG1Underrun)
@property
def awg2_underrun(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG2Underrun)
@property
def awg3_underrun(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG3Underrun)
@property
def awg4_underrun(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG4Underrun)
@property
def awg1_queue_end(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG1QueueEnd)
@property
def awg2_queue_end(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG2QueueEnd)
@property
def awg3_queue_end(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG3QueueEnd)
@property
def awg4_queue_end(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG4QueueEnd)
@property
def awg1_waveform_start(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG1WfStart)
@property
def awg2_waveform_start(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG2WfStart)
@property
def awg3_waveform_start(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG3WfStart)
@property
def awg4_waveform_start(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG4WfStart)
@property
def awg1_queue_marker(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG1QueueMarker)
@property
def awg2_queue_marker(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG2QueueMarker)
@property
def awg3_queue_marker(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG3QueueMarker)
@property
def awg4_queue_marker(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG4QueueMarker)
@property
def awg1_queue_flushed(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG1QueueFlushed)
@property
def awg2_queue_flushed(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG2QueueFlushed)
@property
def awg3_queue_flushed(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG3QueueFlushed)
@property
def awg4_queue_flushed(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG4QueueFlushed)
@property
def awg1_queue_running(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG1QueueRunning)
@property
def awg2_queue_running(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG2QueueRunning)
@property
def awg3_queue_running(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG3QueueRunning)
@property
def awg4_queue_running(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG4QueueRunning)
#***********DIG*************
@property
def daq1_empty(self):
return self.__module.getEvent(constants.SD_AIO_Event_DAQ1Empty)
@property
def daq2_empty(self):
return self.__module.getEvent(constants.SD_AIO_Event_DAQ2Empty)
@property
def daq3_empty(self):
return self.__module.getEvent(constants.SD_AIO_Event_DAQ3Empty)
@property
def daq4_empty(self):
return self.__module.getEvent(constants.SD_AIO_Event_DAQ4Empty)
@property
def daq1_running(self):
return self.__module.getEvent(constants.SD_AIO_Event_DAQ1Running)
@property
def daq2_running(self):
return self.__module.getEvent(constants.SD_AIO_Event_DAQ2Running)
@property
def daq3_running(self):
return self.__module.getEvent(constants.SD_AIO_Event_DAQ3Running)
@property
def daq4_running(self):
return self.__module.getEvent(constants.SD_AIO_Event_DAQ4Running)
@property
def daq1_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AIO_Event_DAQ1TriggerLoopback)
@property
def daq2_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AIO_Event_DAQ2TriggerLoopback)
@property
def daq3_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AIO_Event_DAQ3TriggerLoopback)
@property
def daq4_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AIO_Event_DAQ4TriggerLoopback)
@property
def fpga_user_0(self):
return self.__module.getEvent(constants.SD_AIO_Event_UserFpgaLoopback0)
@property
def fpga_user_1(self):
return self.__module.getEvent(constants.SD_AIO_Event_UserFpgaLoopback1)
@property
def fpga_user_2(self):
return self.__module.getEvent(constants.SD_AIO_Event_UserFpgaLoopback2)
@property
def fpga_user_3(self):
return self.__module.getEvent(constants.SD_AIO_Event_UserFpgaLoopback3)
@property
def fpga_user_4(self):
return self.__module.getEvent(constants.SD_AIO_Event_UserFpgaLoopback4)
@property
def fpga_user_5(self):
return self.__module.getEvent(constants.SD_AIO_Event_UserFpgaLoopback5)
@property
def fpga_user_6(self):
return self.__module.getEvent(constants.SD_AIO_Event_UserFpgaLoopback6)
@property
def fpga_user_7(self):
return self.__module.getEvent(constants.SD_AIO_Event_UserFpgaLoopback7)
@property
def awg1_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG1TriggerLoopback)
@property
def awg2_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG2TriggerLoopback)
@property
def awg3_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG3TriggerLoopback)
@property
def awg4_trigger_loopback(self):
return self.__module.getEvent(constants.SD_AIO_Event_AWG4TriggerLoopback)
class InstructionAIO(InstructionDIG, InstructionAWG):
def __init__(self,module):
self.__module = module
InstructionDIG.__init__(self,module)
InstructionAWG.__init__(self,module)
class SD_AIOHvi:
def __init__(self,module):
self.__module = module
self.__engines = Engine(module)
self.__actions = ActionAio(module)
self.__events = EventAio(module)
self.__instructions = InstructionAIO(module)
self.__triggers = TriggerAIO()
@property
def engines(self):
return self.__engines
@property
def actions(self):
return self.__actions
@property
def events(self):
return self.__events
@property
def instructions(self):
return self.__instructions
@property
def triggers(self):
return self.__triggers
class SD_AIO(SD_AIN,SD_AOU) :
def __init__(self):
super(SD_AIO, self).__init__()
self.__hvi = None
def createHvi(self):
self.__hvi = SD_AIOHvi(self)
@property
def hvi(self):
return self.__hvi
def getAttributeId64(self, attributeId):
if self._SD_Object__handle > 0 :
id = c_longlong(0);
self._SD_Object__core_dll.SD_Module_Hvi_getAttributeId64(self._SD_Object__handle, attributeId, byref(id))
return id.value
else :
return SD_Error.MODULE_NOT_OPENED
def getAction(self, actionId):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIO_getAction(self._SD_Object__handle, actionId)
else :
return SD_Error.MODULE_NOT_OPENED
def getEvent(self, eventId):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIO_getEvent(self._SD_Object__handle, eventId)
else :
return SD_Error.MODULE_NOT_OPENED
def clockIOconfig(self,port,clockConfig):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIO_clockIOconfig(self._SD_Object__handle, port, clockConfig)
else :
return SD_Error.MODULE_NOT_OPENED
def clockGetFrequency(self,port):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIO_clockGetFrequency(self._SD_Object__handle, port)
else :
return SD_Error.MODULE_NOT_OPENED
def clockGetSyncFrequency(self, port):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIO_clockGetSyncFrequency(self._SD_Object__handle, port)
else :
return SD_Error.MODULE_NOT_OPENED
def clockSetFrequency(self, port, frequency, mode = 1):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIO_clockSetFrequency(self._SD_Object__handle, port, frequency, mode)
else :
return SD_Error.MODULE_NOT_OPENED
def clockResetPhase(self, port, triggerBehavior, PXItrigger, skew = 0):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIO_clockResetPhase(self._SD_Object__handle, port, triggerBehavior, PXItrigger, skew)
else :
return SD_Error.MODULE_NOT_OPENED
def triggerIOconfig(self, port, direction):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIO_triggerIOconfig(self._SD_Object__handle, port, direction)
else :
return SD_Error.MODULE_NOT_OPENED
def triggerIOwrite(self, port, value, syncMode):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIO_triggerIOwrite(self._SD_Object__handle, port, value, syncMode)
else :
return SD_Error.MODULE_NOT_OPENED
def triggerIOread(self, port):
if self._SD_Object__handle > 0 :
return self._SD_Object__core_dll.SD_AIO_triggerIOread(self._SD_Object__handle, port)
else :
return SD_Error.MODULE_NOT_OPENED
| 34.195946
| 237
| 0.699673
| 13,631
| 116,403
| 5.491307
| 0.065146
| 0.057928
| 0.082563
| 0.080799
| 0.791268
| 0.771215
| 0.761169
| 0.746927
| 0.722426
| 0.683068
| 0
| 0.01652
| 0.226223
| 116,403
| 3,403
| 238
| 34.205995
| 0.814522
| 0.001873
| 0
| 0.562002
| 0
| 0
| 0.001653
| 0.000456
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0.000379
| 0.002655
| 0.142207
| 0.601441
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
28ef12797552c3bf27cde0539b14ac7d5c8c6016
| 145
|
py
|
Python
|
Codewars/Ones_and_zeros - (7 kyu).py
|
maxcohen31/A-bored-math-student
|
007beb4dabf7b4406f48e9a3a967c29d032eab89
|
[
"MIT"
] | null | null | null |
Codewars/Ones_and_zeros - (7 kyu).py
|
maxcohen31/A-bored-math-student
|
007beb4dabf7b4406f48e9a3a967c29d032eab89
|
[
"MIT"
] | null | null | null |
Codewars/Ones_and_zeros - (7 kyu).py
|
maxcohen31/A-bored-math-student
|
007beb4dabf7b4406f48e9a3a967c29d032eab89
|
[
"MIT"
] | null | null | null |
def binary_array_to_number(arr):
return int(''.join([str(i) for i in arr]), 2)
b = [0, 1, 0, 1]
print(binary_array_to_number(b))
| 24.166667
| 49
| 0.606897
| 27
| 145
| 3.037037
| 0.666667
| 0.268293
| 0.317073
| 0.463415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044248
| 0.22069
| 145
| 6
| 50
| 24.166667
| 0.681416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.5
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
3a70fcbe027587c21d19ed5234bf27b5d3043a6b
| 4,122
|
py
|
Python
|
migrations/versions/c4462f0b54c9_add_collection_value_field.py
|
xorg/mtg-dashboard
|
634ccdf6ca450ed999a927bf384f679ffad7919a
|
[
"MIT"
] | null | null | null |
migrations/versions/c4462f0b54c9_add_collection_value_field.py
|
xorg/mtg-dashboard
|
634ccdf6ca450ed999a927bf384f679ffad7919a
|
[
"MIT"
] | null | null | null |
migrations/versions/c4462f0b54c9_add_collection_value_field.py
|
xorg/mtg-dashboard
|
634ccdf6ca450ed999a927bf384f679ffad7919a
|
[
"MIT"
] | null | null | null |
"""add collection value field
Revision ID: c4462f0b54c9
Revises: 35c42c42d58b
Create Date: 2021-06-28 16:40:34.746137
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c4462f0b54c9'
down_revision = '35c42c42d58b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('card', schema=None) as batch_op:
batch_op.alter_column('id',
existing_type=sa.INTEGER(),
nullable=False,
autoincrement=True)
batch_op.alter_column('name',
existing_type=sa.TEXT(),
nullable=False)
batch_op.alter_column('count',
existing_type=sa.INTEGER(),
nullable=False)
with op.batch_alter_table('collection', schema=None) as batch_op:
batch_op.add_column(sa.Column('value', sa.Float(), nullable=True))
batch_op.alter_column('id',
existing_type=sa.INTEGER(),
nullable=False,
autoincrement=True)
batch_op.alter_column('name',
existing_type=sa.TEXT(),
nullable=False)
with op.batch_alter_table('collection_card_rel', schema=None) as batch_op:
batch_op.alter_column('collection_id',
existing_type=sa.INTEGER(),
nullable=False)
batch_op.alter_column('card_id',
existing_type=sa.INTEGER(),
nullable=False)
batch_op.drop_index('idx_collection_card_rel_card_id')
batch_op.drop_index('idx_collection_card_rel_collection_id')
with op.batch_alter_table('price', schema=None) as batch_op:
batch_op.alter_column('id',
existing_type=sa.INTEGER(),
nullable=False,
autoincrement=True)
batch_op.alter_column('card_id',
existing_type=sa.INTEGER(),
nullable=False)
batch_op.alter_column('date',
existing_type=sa.TEXT(),
nullable=False)
batch_op.drop_index('idx_price_card_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('price', schema=None) as batch_op:
batch_op.create_index('idx_price_card_id', ['card_id'], unique=False)
batch_op.alter_column('date',
existing_type=sa.TEXT(),
nullable=True)
batch_op.alter_column('card_id',
existing_type=sa.INTEGER(),
nullable=True)
batch_op.alter_column('id',
existing_type=sa.INTEGER(),
nullable=True,
autoincrement=True)
with op.batch_alter_table('collection_card_rel', schema=None) as batch_op:
batch_op.create_index('idx_collection_card_rel_collection_id', ['collection_id'], unique=False)
batch_op.create_index('idx_collection_card_rel_card_id', ['card_id'], unique=False)
batch_op.alter_column('card_id',
existing_type=sa.INTEGER(),
nullable=True)
batch_op.alter_column('collection_id',
existing_type=sa.INTEGER(),
nullable=True)
with op.batch_alter_table('collection', schema=None) as batch_op:
batch_op.alter_column('name',
existing_type=sa.TEXT(),
nullable=True)
batch_op.alter_column('id',
existing_type=sa.INTEGER(),
nullable=True,
autoincrement=True)
batch_op.drop_column('value')
with op.batch_alter_table('card', schema=None) as batch_op:
batch_op.alter_column('count',
existing_type=sa.INTEGER(),
nullable=True)
batch_op.alter_column('name',
existing_type=sa.TEXT(),
nullable=True)
batch_op.alter_column('id',
existing_type=sa.INTEGER(),
nullable=True,
autoincrement=True)
# ### end Alembic commands ###
| 35.230769
| 103
| 0.605289
| 480
| 4,122
| 4.902083
| 0.139583
| 0.107097
| 0.101997
| 0.152996
| 0.848279
| 0.835954
| 0.835954
| 0.816405
| 0.758181
| 0.758181
| 0
| 0.017663
| 0.285784
| 4,122
| 116
| 104
| 35.534483
| 0.78159
| 0.074721
| 0
| 0.822222
| 0
| 0
| 0.107757
| 0.036007
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.022222
| 0
| 0.044444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c93d254824fe4bf42962e46e136ff6068ec2902c
| 11,985
|
py
|
Python
|
tools/mo/unit_tests/mo/front/mxnet/arange_like_test.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/mo/unit_tests/mo/front/mxnet/arange_like_test.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/mo/unit_tests/mo/front/mxnet/arange_like_test.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.front.mxnet.arange_like_replacer import ArangeLikeReplacer
from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs
from unit_tests.utils.graph import build_graph, shaped_parameter, regular_op_with_empty_data, result, connect, \
shaped_const_with_data, connect_data
class ArangeLikeReplacerTest(unittest.TestCase):
def test_axis_not_none_start_0(self):
graph = build_graph(
nodes_attrs={
**shaped_parameter('input', int64_array([1, 3, 5, 5])),
**regular_op_with_empty_data('arange_like', {'op': 'arange_like', 'type': None, 'axis': 3, 'repeat': 1,
'start': 0, 'step': 1}),
**result('result')
},
edges=[
*connect('input', 'arange_like'),
*connect('arange_like', 'result')
]
)
ref_graph = build_graph(
nodes_attrs={
**shaped_parameter('input', int64_array([1, 3, 5, 5])),
**regular_op_with_empty_data('shape_of', {'op': 'ShapeOf', 'type': 'ShapeOf'}),
**shaped_const_with_data('gather_axis', None),
**shaped_const_with_data('gather_indices', None),
**regular_op_with_empty_data('gather', {'op': 'Gather', 'type': 'Gather'}),
**shaped_const_with_data('range_start', None),
**shaped_const_with_data('range_step', None),
**shaped_const_with_data('squeeze_const', None),
**regular_op_with_empty_data('squeeze', {'op': 'Squeeze', 'type': 'Squeeze'}),
**regular_op_with_empty_data('range', {'op': 'Range', 'type': 'Range'}),
**result('result')
},
edges=[
*connect('input', 'shape_of'),
*connect('shape_of', '0:gather'),
*connect('gather_axis', '1:gather'),
*connect('gather_indices', '2:gather'),
*connect('range_start', '0:range'),
*connect('gather', '0:squeeze'),
*connect('squeeze_const', '1:squeeze'),
*connect('squeeze', '1:range'),
*connect('range_step', '2:range'),
*connect('range', 'result')
],
update_attributes={
'gather_axis': {'value': 3},
'gather_indices': {'value': 0},
'range_start': {'value': 0},
'range_step': {'value': 1}
}
)
ArangeLikeReplacer().find_and_replace_pattern(graph)
flag, resp = compare_graphs(graph, ref_graph, 'result', 'result', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_axis_not_none_start_1_step_2(self):
graph = build_graph(
nodes_attrs={
**shaped_parameter('input', int64_array([1, 3, 5, 5])),
**regular_op_with_empty_data('arange_like', {'op': 'arange_like', 'type': None, 'axis': 3, 'repeat': 1,
'start': 1, 'step': 2}),
**result('result')
},
edges=[
*connect('input', 'arange_like'),
*connect('arange_like', 'result')
]
)
ref_graph = build_graph(
nodes_attrs={
**shaped_parameter('input', int64_array([1, 3, 5, 5])),
**regular_op_with_empty_data('shape_of', {'op': 'ShapeOf', 'type': 'ShapeOf'}),
**shaped_const_with_data('gather_axis', None),
**shaped_const_with_data('gather_indices', None),
**regular_op_with_empty_data('gather', {'op': 'Gather', 'type': 'Gather'}),
**regular_op_with_empty_data('mul', {'op': 'Mul', 'type': 'Multiply'}),
**shaped_const_with_data('mul_const', None),
**shaped_const_with_data('range_start', None),
**shaped_const_with_data('range_step', None),
**shaped_const_with_data('add_const', None),
**regular_op_with_empty_data('add', {'op': 'Add', 'type': 'Add'}),
**shaped_const_with_data('squeeze_const', None),
**regular_op_with_empty_data('squeeze', {'op': 'Squeeze', 'type': 'Squeeze'}),
**regular_op_with_empty_data('range', {'op': 'Range', 'type': 'Range'}),
**regular_op_with_empty_data('slice', {'op': 'Slice', 'type': None}),
**shaped_const_with_data('slice_start', None),
**shaped_const_with_data('slice_axes', None),
**shaped_const_with_data('slice_step', None),
**result('result')
},
edges=[
*connect('input', 'shape_of'),
*connect('shape_of', '0:gather'),
*connect('gather_axis', '1:gather'),
*connect('gather_indices', '2:gather'),
*connect('range_start', '0:range'),
*connect('gather', '0:mul'),
*connect('mul_const', '1:mul'),
*connect('mul', '0:add'),
*connect('add_const', '1:add'),
*connect('squeeze_const', '1:squeeze'),
*connect('add', '0:squeeze'),
*connect('squeeze', '1:range'),
*connect('range_step', '2:range'),
*connect('range', '0:slice'),
*connect('slice_start', '1:slice'),
*connect_data('gather', '2:slice'),
*connect('slice_axes', '3:slice'),
*connect('slice_step', '4:slice'),
*connect('slice', 'result')
],
update_attributes={
'gather_axis': {'value': 3},
'gather_indices': {'value': 0},
'range_start': {'value': 1},
'range_step': {'value': 2},
'add_const': {'value': 1},
'mul_const': {'value': 2},
'slice_start': {'value': int64_array([0])},
'slice_axes': {'value': int64_array([0])},
'slice_step': {'value': int64_array([1])},
}
)
ArangeLikeReplacer().find_and_replace_pattern(graph)
flag, resp = compare_graphs(graph, ref_graph, 'result', 'result', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_axis_none_start_0(self):
graph = build_graph(
nodes_attrs={
**shaped_parameter('input', int64_array([1, 3, 5, 5])),
**regular_op_with_empty_data('arange_like', {'op': 'arange_like', 'type': None, 'axis': None,
'repeat': 1, 'start': 0, 'step': 1}),
**result('result')
},
edges=[
*connect('input', 'arange_like'),
*connect('arange_like', 'result')
]
)
ref_graph = build_graph(
nodes_attrs={
**shaped_parameter('input', int64_array([1, 3, 5, 5])),
**regular_op_with_empty_data('shape_of', {'op': 'ShapeOf', 'type': 'ShapeOf'}),
**regular_op_with_empty_data('reduce_prod', {'op': 'ReduceProd', 'type': 'ReduceProd'}),
**shaped_const_with_data('reduce_prod_const', None),
**shaped_const_with_data('squeeze_const', None),
**regular_op_with_empty_data('squeeze', {'op': 'Squeeze', 'type': 'Squeeze'}),
**shaped_const_with_data('range_start', None),
**shaped_const_with_data('range_step', None),
**regular_op_with_empty_data('range', {'op': 'Range', 'type': 'Range'}),
**regular_op_with_empty_data('reshape_backward', {'op': 'Reshape', 'type': 'Reshape'}),
**result('result')
},
edges=[
*connect('input', 'shape_of'),
*connect('shape_of', '0:reduce_prod'),
*connect('reduce_prod_const', '1:reduce_prod'),
*connect('squeeze_const', '1:squeeze'),
*connect('reduce_prod', '0:squeeze'),
*connect('range_start', '0:range'),
*connect('range_step', '2:range'),
*connect('squeeze', '1:range'),
*connect('range', '0:reshape_backward'),
*connect_data('shape_of', '1:reshape_backward'),
*connect('reshape_backward', 'result')
],
update_attributes={
'range_start': {'value': 0},
'range_step': {'value': 1},
'reduce_prod_const': {'value': int64_array([0])}
}
)
ArangeLikeReplacer().find_and_replace_pattern(graph)
flag, resp = compare_graphs(graph, ref_graph, 'result', 'result', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_axis_none_start_1(self):
graph = build_graph(
nodes_attrs={
**shaped_parameter('input', int64_array([1, 3, 5, 5])),
**regular_op_with_empty_data('arange_like', {'op': 'arange_like', 'type': None, 'axis': None,
'repeat': 1, 'start': 1, 'step': 1}),
**result('result')
},
edges=[
*connect('input', 'arange_like'),
*connect('arange_like', 'result')
]
)
ref_graph = build_graph(
nodes_attrs={
**shaped_parameter('input', int64_array([1, 3, 5, 5])),
**regular_op_with_empty_data('shape_of', {'op': 'ShapeOf', 'type': 'ShapeOf'}),
**regular_op_with_empty_data('reduce_prod', {'op': 'ReduceProd', 'type': 'ReduceProd'}),
**shaped_const_with_data('reduce_prod_const', None),
**shaped_const_with_data('squeeze_const', None),
**regular_op_with_empty_data('squeeze', {'op': 'Squeeze', 'type': 'Squeeze'}),
**shaped_const_with_data('add_const', None),
**regular_op_with_empty_data('add', {'op': 'Add', 'type': 'Add'}),
**shaped_const_with_data('range_start', None),
**shaped_const_with_data('range_step', None),
**regular_op_with_empty_data('range', {'op': 'Range', 'type': 'Range'}),
**regular_op_with_empty_data('reshape_backward', {'op': 'Reshape', 'type': 'Reshape'}),
**result('result')
},
edges=[
*connect('input', 'shape_of'),
*connect('shape_of', '0:reduce_prod'),
*connect('reduce_prod_const', '1:reduce_prod'),
*connect('squeeze_const', '1:squeeze'),
*connect('add_const', '1:add'),
*connect('reduce_prod', '0:add'),
*connect('add', '0:squeeze'),
*connect('range_start', '0:range'),
*connect('range_step', '2:range'),
*connect('squeeze', '1:range'),
*connect('range', '0:reshape_backward'),
*connect_data('shape_of', '1:reshape_backward'),
*connect('reshape_backward', 'result')
],
update_attributes={
'range_start': {'value': 1},
'range_step': {'value': 1},
'add_const': {'value': 1},
'reduce_prod_const': {'value': int64_array([0])}
}
)
ArangeLikeReplacer().find_and_replace_pattern(graph)
flag, resp = compare_graphs(graph, ref_graph, 'result', 'result', check_op_attrs=True)
self.assertTrue(flag, resp)
| 50.146444
| 119
| 0.507384
| 1,205
| 11,985
| 4.706224
| 0.082158
| 0.04285
| 0.061894
| 0.085699
| 0.878152
| 0.851525
| 0.819961
| 0.807618
| 0.803209
| 0.803209
| 0
| 0.019109
| 0.327576
| 11,985
| 238
| 120
| 50.357143
| 0.684576
| 0.006425
| 0
| 0.721739
| 0
| 0
| 0.214448
| 0
| 0
| 0
| 0
| 0
| 0.017391
| 1
| 0.017391
| false
| 0
| 0.021739
| 0
| 0.043478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c97e2ce481828e9206665bd19591dae7330adc32
| 6,364
|
py
|
Python
|
convert3d_niftimultiorgan.py
|
tangy5/abdomenSeg2D
|
5af1f2dcfa30d3bc864db6931c533be6650d88a4
|
[
"BSD-3-Clause"
] | null | null | null |
convert3d_niftimultiorgan.py
|
tangy5/abdomenSeg2D
|
5af1f2dcfa30d3bc864db6931c533be6650d88a4
|
[
"BSD-3-Clause"
] | null | null | null |
convert3d_niftimultiorgan.py
|
tangy5/abdomenSeg2D
|
5af1f2dcfa30d3bc864db6931c533be6650d88a4
|
[
"BSD-3-Clause"
] | null | null | null |
import torch.utils.data as data
import os
import random
import glob
from PIL import Image
import numpy as np
import nibabel as nb
image_dir = ''
gt_dir = ''
label2d_dir = ''
output_dir = ''
result2d = ''
for i in range(10, 51):
label2d_dir = os.path.join(result2d, 'val_{}'.format(i))
output_dir = os.path.join(result2d, 'result_{}'.format(i))
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
count = 0
imagename2file = {}
for img in os.listdir(label2d_dir):
count += 1
image_name = img.split('_')[0]
image_idx = int(img.split('_')[1].split('.png')[0])
image_path = os.path.join(label2d_dir, img)
if image_name not in imagename2file:
imagename2file[image_name] = {}
imagename2file[image_name][image_idx] = image_path
count = 0
for item in imagename2file:
image_file = os.path.join(image_dir, item)
imgnb = nb.load(image_file)
label_np = np.zeros((imgnb.shape[0], imgnb.shape[1], imgnb.shape[2]))
for i in range(label_np.shape[2]):
labelslice_file = imagename2file[item][i]
labelslice_image = Image.open(labelslice_file).rotate(-90)
labelslice_np = np.array(labelslice_image)
label_np[:,:,i] = labelslice_np
label_nb = nb.Nifti1Image(label_np, imgnb.affine)
label_file = os.path.join(output_dir, item)
nb.save(label_nb, label_file)
count += 1
print('[{}] converted {}'.format(count, item))
def dice(nparray, gtarray):
return np.sum(nparray[gtarray==1])*2.0 / (np.sum(nparray) + np.sum(gtarray)+0.000000001)
for i in range(10, 51):
count = 0
output_dir = os.path.join(result2d, 'result_{}'.format(i))
dice_file = os.path.join(output_dir, 'dice_result_{}.txt'.format(i))
dice_wr = open(dice_file, 'w')
average_dice = 0
average_count = 0
organ_dice_list = [0] * 13
organ_dice_count = [0] * 13
for img in os.listdir(output_dir):
if img.endswith('.nii.gz'):
count += 1
label_path = os.path.join(output_dir, img)
# if os.path.isfile(image_path) and os.path.isfile(seg_file):
labelnb = nb.load(label_path)
labelnp = np.array(labelnb.dataobj)
gt_path = os.path.join(gt_dir, img)
gtnb = nb.load(gt_path)
gtnp = np.array(gtnb.dataobj)
DSC_list = []
for i in range(1, 14):
idx = np.where(labelnp == i)
organ_np = np.zeros((labelnp.shape[0], labelnp.shape[1], labelnp.shape[2]))
organ_np[idx] = 1
idx = np.where(gtnp == i)
gt_organ = np.zeros((gtnp.shape[0], gtnp.shape[1], gtnp.shape[2]))
gt_organ[idx] = 1
organ_DSC = dice(organ_np, gt_organ)
DSC_list.append(organ_DSC)
average_dice += organ_DSC
organ_dice_list[i-1] += organ_DSC
if organ_DSC > 0.01:
average_count += 1
organ_dice_count[i-1] += 1
for j, item in enumerate(DSC_list):
if j == len(DSC_list)-1:
dice_wr.write(str(item) + '\n')
else:
dice_wr.write(str(item) + ' ')
print('[{}] -- {} processed'.format(count, img))
average_organ_dice = []
for i in range(13):
average_organ_dice.append(organ_dice_list[i]/organ_dice_count[i])
dice_wr.write(str(organ_dice_list[i]/organ_dice_count[i]) + ' ')
dice_wr.write(str(average_dice/average_count))
print(average_dice/average_count)
print(average_organ_dice)
dice_wr.close()
#single
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
count = 0
imagename2file = {}
for img in os.listdir(label2d_dir):
count += 1
image_name = img.split('.nii.gz')[0] + '.nii.gz'
image_idx = int(img.split('.nii.gz_')[1].split('.png')[0])
image_path = os.path.join(label2d_dir, img)
if image_name not in imagename2file:
imagename2file[image_name] = {}
imagename2file[image_name][image_idx] = image_path
count = 0
for item in imagename2file:
image_file = os.path.join(image_dir, item)
imgnb = nb.load(image_file)
label_np = np.zeros((imgnb.shape[0], imgnb.shape[1], imgnb.shape[2]))
for i in range(label_np.shape[2]):
labelslice_file = imagename2file[item][i]
labelslice_image = Image.open(labelslice_file).rotate(-90)
labelslice_np = np.array(labelslice_image)
label_np[:,:,i] = labelslice_np
label_nb = nb.Nifti1Image(label_np, imgnb.affine)
label_file = os.path.join(output_dir, item)
nb.save(label_nb, label_file)
count += 1
print('[{}] converted {}'.format(count, item))
def dice(nparray, gtarray):
return np.sum(nparray[gtarray==1])*2.0 / (np.sum(nparray) + np.sum(gtarray)+0.000000001)
count = 0
# output_dir = os.path.join(result2d, 'result_{}'.format(i))
dice_file = os.path.join(output_dir, 'dice_result_{}.txt'.format(i))
dice_wr = open(dice_file, 'w')
average_dice = 0
average_count = 0
organ_dice_list = [0] * 13
organ_dice_count = [0] * 13
for img in os.listdir(output_dir):
if img.endswith('.nii.gz'):
count += 1
label_path = os.path.join(output_dir, img)
# if os.path.isfile(image_path) and os.path.isfile(seg_file):
labelnb = nb.load(label_path)
labelnp = np.array(labelnb.dataobj)
gt_path = os.path.join(gt_dir, img)
gtnb = nb.load(gt_path)
gtnp = np.array(gtnb.dataobj)
DSC_list = []
for i in range(1, 14):
idx = np.where(labelnp == i)
organ_np = np.zeros((labelnp.shape[0], labelnp.shape[1], labelnp.shape[2]))
organ_np[idx] = 1
idx = np.where(gtnp == i)
gt_organ = np.zeros((gtnp.shape[0], gtnp.shape[1], gtnp.shape[2]))
gt_organ[idx] = 1
organ_DSC = dice(organ_np, gt_organ)
DSC_list.append(organ_DSC)
average_dice += organ_DSC
organ_dice_list[i-1] += organ_DSC
if organ_DSC > 0.01:
average_count += 1
organ_dice_count[i-1] += 1
for j, item in enumerate(DSC_list):
if j == len(DSC_list)-1:
dice_wr.write(str(item) + '\n')
else:
dice_wr.write(str(item) + ' ')
print('[{}] -- {} processed'.format(count, img))
average_organ_dice = []
for i in range(13):
average_organ_dice.append(organ_dice_list[i]/organ_dice_count[i])
dice_wr.write(str(organ_dice_list[i]/organ_dice_count[i]) + ' ')
dice_wr.write(str(average_dice/average_count))
print(average_dice/average_count)
print(average_organ_dice)
dice_wr.close()
| 30.449761
| 91
| 0.642678
| 974
| 6,364
| 3.988706
| 0.106776
| 0.033977
| 0.041184
| 0.022651
| 0.952124
| 0.936937
| 0.929215
| 0.929215
| 0.929215
| 0.918919
| 0
| 0.029178
| 0.20836
| 6,364
| 209
| 92
| 30.449761
| 0.741961
| 0.028913
| 0
| 0.89697
| 0
| 0
| 0.030764
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012121
| false
| 0
| 0.042424
| 0.012121
| 0.066667
| 0.048485
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a3566e814223667ba5cc9e35da3f1fb3ed0bdc61
| 52,314
|
py
|
Python
|
tests/unit_tests/test_mdp_files.py
|
MauriceKarrenbrock/FS-NEW_gromacs
|
8d8d815391a674eee109a2f21cfc1493395b73a3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit_tests/test_mdp_files.py
|
MauriceKarrenbrock/FS-NEW_gromacs
|
8d8d815391a674eee109a2f21cfc1493395b73a3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit_tests/test_mdp_files.py
|
MauriceKarrenbrock/FS-NEW_gromacs
|
8d8d815391a674eee109a2f21cfc1493395b73a3
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# pylint: disable=no-self-use
# pylint: disable=protected-access
# pylint: disable=duplicate-code
# pylint: disable=too-many-lines
#############################################################
# Copyright (c) 2020-2020 Maurice Karrenbrock #
# #
# This software is open-source and is distributed under the #
# BSD 3-Clause "New" or "Revised" License #
#############################################################
import pytest
import FSDAMGromacs.mdp_files as mdp_files
class Testmake_free_energy_lines():
def test_works(self):
expected = [
'; Free energy control stuff', 'free-energy = yes',
'init-lambda = 1', 'delta-lambda = -0.1',
'couple-moltype = ALC', 'couple-lambda0 =vdw',
'couple-lambda1 =none', 'couple-intramol =no',
'sc-alpha = 0.5', 'sc-coul = no',
'sc-sigma = 0.25', 'sc-power = 1',
'nstdhdl = 100', 'separate-dhdl-file = yes',
''
]
output = mdp_files.make_free_energy_lines(condition_lambda0='vdw',
condition_lambda1='none',
alchemical_molecule='ALC',
lambda_step=-0.1,
starting_lambda=1,
couple_intramol='no',
sc_alpha=0.5,
sc_coul='no',
sc_sigma=0.25,
sc_power=1,
nstdhdl=100,
separate_dhdl_file='yes',
free_energy='yes')
assert output == expected
class Testcreate_COMCOM_pulling_strings():
def test_works_pbc_none(self):
COM_pull_goups = ['DU1', 'Protein', 'DU2']
harmonic_kappa = [['DU1', 'Protein', 120], ['DU1', 'DU2', 121],
['Protein', 'DU2', 0]]
pbc = None
output = mdp_files.create_COMCOM_pulling_strings(
COM_pull_goups=COM_pull_goups,
harmonic_kappa=harmonic_kappa,
pbc_atoms=pbc)
expected_output = [
';COM PULLING', 'pull = yes',
'pull-print-com = yes', 'pull-print-components = no',
f'pull-ncoords = {len(COM_pull_goups)-1}',
'pull-nstxout = 10',
f'pull-ngroups = {len(COM_pull_goups)}',
f'pull-group1-name = {COM_pull_goups[0]}\n' +
f'pull-group2-name = {COM_pull_goups[1]}\n' +
f'pull-group3-name = {COM_pull_goups[2]}\n',
'pull-pbc-ref-prev-step-com = yes',
'pull-coord1-geometry = distance',
'pull-coord1-type = umbrella',
'pull-coord1-dim = Y Y Y', 'pull-coord1-groups = 1 2',
'pull-coord1-start = yes', 'pull-coord1-init = 0.0',
'pull-coord1-rate = 0', 'pull-coord1-k = 120',
'pull-coord2-geometry = distance',
'pull-coord2-type = umbrella',
'pull-coord2-dim = Y Y Y', 'pull-coord2-groups = 1 3',
'pull-coord2-start = yes', 'pull-coord2-init = 0.0',
'pull-coord2-rate = 0', 'pull-coord2-k = 121'
]
assert output == expected_output
def test_works_given_pbc(self):
COM_pull_goups = ['DU1', 'Protein', 'DU2']
harmonic_kappa = [['DU1', 'Protein', 120], ['DU1', 'DU2', 121],
['Protein', 'DU2', 0]]
pbc = (124, 0, 0)
output = mdp_files.create_COMCOM_pulling_strings(
COM_pull_goups=COM_pull_goups,
harmonic_kappa=harmonic_kappa,
pbc_atoms=pbc)
expected_output = [
';COM PULLING',
'pull = yes',
'pull-print-com = yes',
'pull-print-components = no',
f'pull-ncoords = {len(COM_pull_goups)-1}',
'pull-nstxout = 10',
f'pull-ngroups = {len(COM_pull_goups)}',
f'pull-group1-name = {COM_pull_goups[0]}\n' +
f'pull-group2-name = {COM_pull_goups[1]}\n' +
f'pull-group3-name = {COM_pull_goups[2]}\n',
'pull-pbc-ref-prev-step-com = yes',
'pull-coord1-geometry = distance',
'pull-coord1-type = umbrella',
'pull-coord1-dim = Y Y Y',
'pull-coord1-groups = 1 2',
'pull-coord1-start = yes',
'pull-coord1-init = 0.0',
'pull-coord1-rate = 0',
'pull-coord1-k = 120',
'pull-group1-pbcatom = 124',
'pull-coord2-geometry = distance',
'pull-coord2-type = umbrella',
'pull-coord2-dim = Y Y Y',
'pull-coord2-groups = 1 3',
'pull-coord2-start = yes',
'pull-coord2-init = 0.0',
'pull-coord2-rate = 0',
'pull-coord2-k = 121',
'pull-group2-pbcatom = 0',
'pull-group3-pbcatom = 0',
]
assert output == expected_output
class TestMdpFile():
def test_init(self):
mdp_file = 'mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = ['a', 'b', 'c']
instance = mdp_files.MdpFile(mdp_file, alchemical_molecule,
timestep_ps, number_of_steps, temperature,
lambda_steps, COM_pull_goups)
output = [
instance.mdp_file, instance.alchemical_molecule,
instance.timestep_ps, instance.number_of_steps,
instance.temperature, instance.lambda_steps,
instance.COM_pull_goups, instance._template
]
expected = [
mdp_file + '.mdp', alchemical_molecule, timestep_ps,
number_of_steps, temperature, lambda_steps, COM_pull_goups, []
]
assert output == expected
def test__create_free_energy_strings(self, mocker):
mdp_file = 'mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = None
instance = mdp_files.MdpFile(mdp_file, alchemical_molecule,
timestep_ps, number_of_steps, temperature,
lambda_steps, COM_pull_goups)
m_free = mocker.patch('FSDAMGromacs.mdp_files.make_free_energy_lines',
return_value=['A', 'B'])
output = instance._create_free_energy_strings(condition_lambda0='vdw',
condition_lambda1='none',
starting_lambda=1,
couple_intramol='no',
sc_alpha=0.0,
sc_coul='no',
sc_sigma=0.25,
sc_power=1,
nstdhdl=100,
separate_dhdl_file='yes',
free_energy='yes')
assert output == ['A', 'B']
m_free.assert_called_once_with(condition_lambda0='vdw',
condition_lambda1='none',
alchemical_molecule='alc',
lambda_step=None,
starting_lambda=1,
couple_intramol='no',
sc_alpha=0.0,
sc_coul='no',
sc_sigma=0.25,
sc_power=1,
nstdhdl=100,
separate_dhdl_file='yes',
free_energy='yes')
def test__create_COMCOM_pulling_strings_None(self):
mdp_file = 'mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = None
instance = mdp_files.MdpFile(mdp_file, alchemical_molecule,
timestep_ps, number_of_steps, temperature,
lambda_steps, COM_pull_goups)
assert instance._create_COMCOM_pulling_strings() == ['']
def test__create_COMCOM_pulling_strings_empty_list(self):
mdp_file = 'mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = []
instance = mdp_files.MdpFile(mdp_file, alchemical_molecule,
timestep_ps, number_of_steps, temperature,
lambda_steps, COM_pull_goups)
assert instance._create_COMCOM_pulling_strings() == ['']
def test__create_COMCOM_pulling_strings_works(self, mocker):
mdp_file = 'mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = ['DU1', 'Protein', 'DU2']
harmonic_kappa = [['DU1', 'Protein', 120], ['DU1', 'DU2', 121],
['Protein', 'DU2', 0]]
m_COM = mocker.patch(
'FSDAMGromacs.mdp_files.create_COMCOM_pulling_strings',
return_value=['A', 'B'])
instance = mdp_files.MdpFile(mdp_file, alchemical_molecule,
timestep_ps, number_of_steps, temperature,
lambda_steps, COM_pull_goups,
harmonic_kappa)
assert instance._create_COMCOM_pulling_strings() == ['A', 'B']
m_COM.assert_called_once_with(COM_pull_goups=COM_pull_goups,
harmonic_kappa=harmonic_kappa,
pbc_atoms=None)
@pytest.mark.parametrize('test_type, harmonic_kappa',
[('harmonic_kappa None', None),
('harmonic_kappa []', [])])
def test__create_COMCOM_pulling_strings_raises_valueerror(
self, test_type, harmonic_kappa):
print('Logging test type for visibility: ' + test_type)
mdp_file = 'mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = ['DU1', 'Protein', 'DU2']
instance = mdp_files.MdpFile(mdp_file, alchemical_molecule,
timestep_ps, number_of_steps, temperature,
lambda_steps, COM_pull_goups,
harmonic_kappa)
with pytest.raises(ValueError):
instance._create_COMCOM_pulling_strings()
def test_get_template(self):
mdp_file = 'mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = ['DU1', 'Protein', 'DU2']
instance = mdp_files.MdpFile(mdp_file, alchemical_molecule,
timestep_ps, number_of_steps, temperature,
lambda_steps, COM_pull_goups)
instance._template = ['AAAAA', 'BBBBB\n', 'ccccc']
expected = ['AAAAA\n', 'BBBBB\n', 'ccccc\n']
assert instance._get_template() == expected
def test_write_template(self, mocker):
mocked_write = mocker.patch(
'PythonAuxiliaryFunctions.files_IO.write_file.write_file')
mdp_file = 'mdp.mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = ['DU1', 'Protein', 'DU2']
instance = mdp_files.MdpFile(mdp_file, alchemical_molecule,
timestep_ps, number_of_steps, temperature,
lambda_steps, COM_pull_goups)
instance._write_template(['template'])
mocked_write.assert_called_once_with(['template'], mdp_file)
def test_execute(self, mocker):
m_hook = mocker.patch.object(mdp_files.MdpFile, '_hook')
m_get = mocker.patch.object(mdp_files.MdpFile, '_get_template')
m_write = mocker.patch.object(mdp_files.MdpFile, '_write_template')
mdp_file = 'mdp.mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = ['DU1', 'Protein', 'DU2']
instance = mdp_files.MdpFile(mdp_file, alchemical_molecule,
timestep_ps, number_of_steps, temperature,
lambda_steps, COM_pull_goups)
instance.execute()
m_hook.assert_called_once()
m_get.assert_called_once()
m_write.assert_called_once()
class TestAnnihilateVdwMdpBoundState():
def test__create_template(self, mocker):
mocked_COM = mocker.patch.object(mdp_files.AnnihilateVdwMdpBoundState,
'_create_COMCOM_pulling_strings',
return_value=['COM'])
mocked_free = mocker.patch.object(mdp_files.AnnihilateVdwMdpBoundState,
'_create_free_energy_strings',
return_value=['FREE'])
mdp_file = 'mdp.mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = None
instance = mdp_files.AnnihilateVdwMdpBoundState(
mdp_file, alchemical_molecule, timestep_ps, number_of_steps,
temperature, lambda_steps, COM_pull_goups)
instance._create_template()
mocked_COM.assert_called_once()
mocked_free.assert_called_once()
expected_mdp = [
'; VARIOUS PREPROCESSING OPTIONS',
'; Preprocessor information: use cpp syntax.',
'; e.g.: -I/home/joe/doe -I/home/mary/roe',
'include =',
'; e.g.: -DPOSRES -DFLEXIBLE (note these variable names are case sensitive)',
'define =', '', '; RUN CONTROL PARAMETERS',
'integrator = md', '; Start time and timestep in ps',
'tinit = 0',
f'dt = {timestep_ps}',
f'nsteps = {number_of_steps}',
'; For exact run continuation or redoing part of a run',
'init-step = 0',
'; Part index is updated automatically on checkpointing (keeps files separate)',
'simulation-part = 1',
'; mode for center of mass motion removal',
'comm-mode = Linear',
'; number of steps for center of mass motion removal',
'nstcomm = 100',
'; group(s) for center of mass motion removal',
'comm-grps =', '',
'; TEST PARTICLE INSERTION OPTIONS',
'rtpi = 0.05', '', '; OUTPUT CONTROL OPTIONS',
'; Output frequency for coords (x), velocities (v) and forces (f)',
'nstxout = 10000',
'nstvout = 10000',
'nstfout = 10000',
'; Output frequency for energies to log file and energy file',
'nstlog = 1000', 'nstcalcenergy = 50',
'nstenergy = 1000',
'; Output frequency and precision for .xtc file',
'nstxtcout = 2000',
'xtc-precision = 1000',
'; This selects the subset of atoms for the .xtc file. You can',
'; select multiple groups. By default all atoms will be written.',
'xtc-grps =', '; Selection of energy groups',
'energygrps = System', '',
'; NEIGHBORSEARCHING PARAMETERS',
'; cut-off scheme (group: using charge groups, Verlet: particle based cut-offs)',
'; nblist update frequency', 'cutoff-scheme = Verlet',
'nstlist = 20',
'verlet-buffer-tolerance = 0.0001',
'; ns algorithm (simple or grid)',
'ns_type = grid',
'; Periodic boundary conditions: xyz, no, xy',
'pbc = xyz', 'periodic-molecules = no',
'; Allowed energy drift due to the Verlet buffer in kJ/mol/ps per atom,',
'; a value of -1 means: use rlist', '; nblist cut-off',
'rlist = 1.0',
'; long-range cut-off for switched potentials',
'rlistlong = -1', '',
'; OPTIONS FOR ELECTROSTATICS AND VDW',
'; Method for doing electrostatics',
'coulombtype = PME', 'rcoulomb-switch = 0',
'rcoulomb = 1.0',
'; Relative dielectric constant for the medium and the reaction field',
'epsilon-r = 1', 'epsilon-rf = 0',
'; Method for doing Van der Waals',
'vdw-type = Cut-off', '; cut-off lengths',
'rvdw-switch = 0', 'rvdw = 1.0',
'; Apply long range dispersion corrections for Energy and Pressure',
'DispCorr = EnerPres',
'; Extension of the potential lookup tables beyond the cut-off',
'table-extension = 1',
'; Separate tables between energy group pairs',
'energygrp-table =',
'; Spacing for the PME/PPPM FFT grid',
'fourierspacing = 0.1',
'; FFT grid size, when a value is 0 fourierspacing will be used',
'fourier-nx = 0', 'fourier-ny = 0',
'fourier-nz = 0', '; EWALD/PME/PPPM parameters',
'pme-order = 4', 'ewald-rtol = 1e-05',
'ewald-geometry = 3d', 'epsilon-surface =',
'optimize-fft = no', '',
'; IMPLICIT SOLVENT ALGORITHM', 'implicit-solvent = No',
'', '; OPTIONS FOR WEAK COUPLING ALGORITHMS',
'; Temperature coupling', 'tcoupl = v-rescale',
'nsttcouple = -1', 'nh-chain-length = 1',
'; Groups to couple separately',
'tc-grps = System',
'; Time constant (ps) and reference temperature (K)',
'tau-t = 0.1',
f'ref-t = {temperature}', '; pressure coupling',
'pcoupl = Parrinello-Rahman',
'pcoupltype = Isotropic',
'nstpcouple = -1',
'; Time constant (ps), compressibility (1/bar) and reference P (bar)',
'tau-p = 1.0',
'compressibility = 4.6e-5',
'ref-p = 1',
'; Scaling of reference coordinates, No, All or COM',
'refcoord-scaling = COM', '',
'; GENERATE VELOCITIES FOR STARTUP RUN',
'gen-vel = no',
f'gen-temp = {temperature}',
'gen-seed = 173529', '', '; OPTIONS FOR BONDS',
'constraints = all-bonds',
'; Type of constraint algorithm',
'constraint-algorithm = Lincs',
'; Do not constrain the start configuration',
'continuation = yes',
'; Use successive overrelaxation to reduce the number of shake iterations',
'Shake-SOR = no', '; Relative tolerance of shake',
'shake-tol = 0.00001',
'; Highest order in the expansion of the constraint coupling matrix',
'lincs-order = 5',
'; Number of iterations in the final step of LINCS. 1 is fine for',
'; normal simulations, but use 2 to conserve energy in NVE runs.',
'; For energy minimization with constraints it should be 4 to 8.',
'lincs-iter = 2',
'; Lincs will write a warning to the stderr if in one step a bond',
'; rotates over more degrees than',
'lincs-warnangle = 30',
'; Convert harmonic bonds to morse potentials',
'morse = no', '', 'FREE', 'COM'
]
assert instance._template == expected_mdp
def test_hook_lambda_None(self, mocker):
mocked_template = \
mocker.patch.object(mdp_files.AnnihilateVdwMdpBoundState, '_create_template')
mdp_file = 'mdp.mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = None
instance = mdp_files.AnnihilateVdwMdpBoundState(
mdp_file, alchemical_molecule, timestep_ps, number_of_steps,
temperature, lambda_steps, COM_pull_goups)
instance._hook()
mocked_template.assert_called_once()
assert instance.lambda_steps == (-1. / number_of_steps)
def test_hook_lambda_input(self, mocker):
mocked_template = \
mocker.patch.object(mdp_files.AnnihilateVdwMdpBoundState, '_create_template')
mdp_file = 'mdp.mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = 33
COM_pull_goups = None
instance = mdp_files.AnnihilateVdwMdpBoundState(
mdp_file, alchemical_molecule, timestep_ps, number_of_steps,
temperature, lambda_steps, COM_pull_goups)
instance._hook()
mocked_template.assert_called_once()
assert instance.lambda_steps == lambda_steps
class TestAnnihilateQMdpBoundState():
def test__create_template(self, mocker):
mocked_COM = mocker.patch.object(mdp_files.AnnihilateQMdpBoundState,
'_create_COMCOM_pulling_strings',
return_value=['COM'])
mocked_free = mocker.patch.object(mdp_files.AnnihilateQMdpBoundState,
'_create_free_energy_strings',
return_value=['FREE'])
mdp_file = 'mdp.mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = None
instance = mdp_files.AnnihilateQMdpBoundState(
mdp_file, alchemical_molecule, timestep_ps, number_of_steps,
temperature, lambda_steps, COM_pull_goups)
instance._create_template()
mocked_COM.assert_called_once()
mocked_free.assert_called_once()
expected_mdp = [
'; VARIOUS PREPROCESSING OPTIONS',
'; Preprocessor information: use cpp syntax.',
'; e.g.: -I/home/joe/doe -I/home/mary/roe',
'include =',
'; e.g.: -DPOSRES -DFLEXIBLE (note these variable names are case sensitive)',
'define =', '', '; RUN CONTROL PARAMETERS',
'integrator = md', '; Start time and timestep in ps',
'tinit = 0',
f'dt = {timestep_ps}',
f'nsteps = {number_of_steps}',
'; For exact run continuation or redoing part of a run',
'init-step = 0',
'; Part index is updated automatically on checkpointing (keeps files separate)',
'simulation-part = 1',
'; mode for center of mass motion removal',
'comm-mode = Linear',
'; number of steps for center of mass motion removal',
'nstcomm = 100',
'; group(s) for center of mass motion removal',
'comm-grps =', '',
'; TEST PARTICLE INSERTION OPTIONS',
'rtpi = 0.05', '', '; OUTPUT CONTROL OPTIONS',
'; Output frequency for coords (x), velocities (v) and forces (f)',
'nstxout = 10000',
'nstvout = 10000',
'nstfout = 10000',
'; Output frequency for energies to log file and energy file',
'nstlog = 1000', 'nstcalcenergy = 50',
'nstenergy = 1000',
'; Output frequency and precision for .xtc file',
'nstxtcout = 2000',
'xtc-precision = 1000',
'; This selects the subset of atoms for the .xtc file. You can',
'; select multiple groups. By default all atoms will be written.',
'xtc-grps =', '; Selection of energy groups',
'energygrps = System', '',
'; NEIGHBORSEARCHING PARAMETERS',
'; cut-off scheme (group: using charge groups, Verlet: particle based cut-offs)',
'; nblist update frequency', 'cutoff-scheme = Verlet',
'nstlist = 20',
'verlet-buffer-tolerance = 0.0001',
'; ns algorithm (simple or grid)',
'ns_type = grid',
'; Periodic boundary conditions: xyz, no, xy',
'pbc = xyz', 'periodic-molecules = no',
'; Allowed energy drift due to the Verlet buffer in kJ/mol/ps per atom,',
'; a value of -1 means: use rlist', '; nblist cut-off',
'rlist = 1.0',
'; long-range cut-off for switched potentials',
'rlistlong = -1', '',
'; OPTIONS FOR ELECTROSTATICS AND VDW',
'; Method for doing electrostatics',
'coulombtype = PME', 'rcoulomb-switch = 0',
'rcoulomb = 1.0',
'; Relative dielectric constant for the medium and the reaction field',
'epsilon-r = 1', 'epsilon-rf = 0',
'; Method for doing Van der Waals',
'vdw-type = Cut-off', '; cut-off lengths',
'rvdw-switch = 0', 'rvdw = 1.0',
'; Apply long range dispersion corrections for Energy and Pressure',
'DispCorr = EnerPres',
'; Extension of the potential lookup tables beyond the cut-off',
'table-extension = 1',
'; Separate tables between energy group pairs',
'energygrp-table =',
'; Spacing for the PME/PPPM FFT grid',
'fourierspacing = 0.1',
'; FFT grid size, when a value is 0 fourierspacing will be used',
'fourier-nx = 0', 'fourier-ny = 0',
'fourier-nz = 0', '; EWALD/PME/PPPM parameters',
'pme-order = 4', 'ewald-rtol = 1e-05',
'ewald-geometry = 3d', 'epsilon-surface =',
'optimize-fft = no', '',
'; IMPLICIT SOLVENT ALGORITHM', 'implicit-solvent = No',
'', '; OPTIONS FOR WEAK COUPLING ALGORITHMS',
'; Temperature coupling', 'tcoupl = v-rescale',
'nsttcouple = -1', 'nh-chain-length = 1',
'; Groups to couple separately',
'tc-grps = System',
'; Time constant (ps) and reference temperature (K)',
'tau-t = 0.1',
f'ref-t = {temperature}', '; pressure coupling',
'pcoupl = Parrinello-Rahman',
'pcoupltype = Isotropic',
'nstpcouple = -1',
'; Time constant (ps), compressibility (1/bar) and reference P (bar)',
'tau-p = 1.0',
'compressibility = 4.6e-5',
'ref-p = 1',
'; Scaling of reference coordinates, No, All or COM',
'refcoord-scaling = COM', '',
'; GENERATE VELOCITIES FOR STARTUP RUN',
'gen-vel = no',
f'gen-temp = {temperature}',
'gen-seed = 173529', '', '; OPTIONS FOR BONDS',
'constraints = all-bonds',
'; Type of constraint algorithm',
'constraint-algorithm = Lincs',
'; Do not constrain the start configuration',
'continuation = yes',
'; Use successive overrelaxation to reduce the number of shake iterations',
'Shake-SOR = no', '; Relative tolerance of shake',
'shake-tol = 0.00001',
'; Highest order in the expansion of the constraint coupling matrix',
'lincs-order = 5',
'; Number of iterations in the final step of LINCS. 1 is fine for',
'; normal simulations, but use 2 to conserve energy in NVE runs.',
'; For energy minimization with constraints it should be 4 to 8.',
'lincs-iter = 2',
'; Lincs will write a warning to the stderr if in one step a bond',
'; rotates over more degrees than',
'lincs-warnangle = 30',
'; Convert harmonic bonds to morse potentials',
'morse = no', '', 'FREE', 'COM'
]
assert instance._template == expected_mdp
def test_hook_lambda_None(self, mocker):
mocked_template = \
mocker.patch.object(mdp_files.AnnihilateQMdpBoundState, '_create_template')
mdp_file = 'mdp.mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = None
instance = mdp_files.AnnihilateQMdpBoundState(
mdp_file, alchemical_molecule, timestep_ps, number_of_steps,
temperature, lambda_steps, COM_pull_goups)
instance._hook()
mocked_template.assert_called_once()
assert instance.lambda_steps == (-1. / number_of_steps)
def test_hook_lambda_input(self, mocker):
mocked_template = \
mocker.patch.object(mdp_files.AnnihilateQMdpBoundState, '_create_template')
mdp_file = 'mdp.mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = 33
COM_pull_goups = None
instance = mdp_files.AnnihilateQMdpBoundState(
mdp_file, alchemical_molecule, timestep_ps, number_of_steps,
temperature, lambda_steps, COM_pull_goups)
instance._hook()
mocked_template.assert_called_once()
assert instance.lambda_steps == lambda_steps
class TestCreateVdwMdpUnboundState():
def test__create_template(self, mocker):
mocked_COM = mocker.patch.object(mdp_files.CreateVdwMdpUnboundState,
'_create_COMCOM_pulling_strings',
return_value=['COM'])
mocked_free = mocker.patch.object(mdp_files.CreateVdwMdpUnboundState,
'_create_free_energy_strings',
return_value=['FREE'])
mdp_file = 'mdp.mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = None
instance = mdp_files.CreateVdwMdpUnboundState(
mdp_file, alchemical_molecule, timestep_ps, number_of_steps,
temperature, lambda_steps, COM_pull_goups)
instance._create_template()
mocked_COM.assert_called_once()
mocked_free.assert_called_once()
expected_mdp = [
'; VARIOUS PREPROCESSING OPTIONS',
'; Preprocessor information: use cpp syntax.',
'; e.g.: -I/home/joe/doe -I/home/mary/roe',
'include =',
'; e.g.: -DPOSRES -DFLEXIBLE (note these variable names are case sensitive)',
'define =', '', '; RUN CONTROL PARAMETERS',
'integrator = md', '; Start time and timestep in ps',
'tinit = 0',
f'dt = {timestep_ps}',
f'nsteps = {number_of_steps}',
'; For exact run continuation or redoing part of a run',
'init-step = 0',
'; Part index is updated automatically on checkpointing (keeps files separate)',
'simulation-part = 1',
'; mode for center of mass motion removal',
'comm-mode = Linear',
'; number of steps for center of mass motion removal',
'nstcomm = 100',
'; group(s) for center of mass motion removal',
'comm-grps =', '',
'; TEST PARTICLE INSERTION OPTIONS',
'rtpi = 0.05', '', '; OUTPUT CONTROL OPTIONS',
'; Output frequency for coords (x), velocities (v) and forces (f)',
'nstxout = 10000',
'nstvout = 10000',
'nstfout = 10000',
'; Output frequency for energies to log file and energy file',
'nstlog = 500', 'nstcalcenergy = 50',
'nstenergy = 1000',
'; Output frequency and precision for .xtc file',
'nstxtcout = 2000',
'xtc-precision = 1000',
'; This selects the subset of atoms for the .xtc file. You can',
'; select multiple groups. By default all atoms will be written.',
'xtc-grps =', '; Selection of energy groups',
'energygrps = System', '',
'; NEIGHBORSEARCHING PARAMETERS',
'; cut-off scheme (group: using charge groups, Verlet: particle based cut-offs)',
'; nblist update frequency', 'cutoff-scheme = Verlet',
'nstlist = 20',
'verlet-buffer-tolerance = 0.0001',
'; ns algorithm (simple or grid)',
'ns_type = grid',
'; Periodic boundary conditions: xyz, no, xy',
'pbc = xyz', 'periodic-molecules = no',
'; Allowed energy drift due to the Verlet buffer in kJ/mol/ps per atom,',
'; a value of -1 means: use rlist', '; nblist cut-off',
'rlist = 1.0',
'; long-range cut-off for switched potentials',
'rlistlong = -1', '',
'; OPTIONS FOR ELECTROSTATICS AND VDW',
'; Method for doing electrostatics',
'coulombtype = PME', 'rcoulomb-switch = 0',
'rcoulomb = 1.0',
'; Relative dielectric constant for the medium and the reaction field',
'epsilon-r = 1', 'epsilon-rf = 0',
'; Method for doing Van der Waals',
'vdw-type = Cut-off', '; cut-off lengths',
'rvdw-switch = 0', 'rvdw = 1.0',
'; Apply long range dispersion corrections for Energy and Pressure',
'DispCorr = EnerPres',
'; Extension of the potential lookup tables beyond the cut-off',
'table-extension = 1',
'; Separate tables between energy group pairs',
'energygrp-table =',
'; Spacing for the PME/PPPM FFT grid',
'fourierspacing = 0.1',
'; FFT grid size, when a value is 0 fourierspacing will be used',
'fourier-nx = 0', 'fourier-ny = 0',
'fourier-nz = 0', '; EWALD/PME/PPPM parameters',
'pme-order = 4', 'ewald-rtol = 1e-05',
'ewald-geometry = 3d', 'epsilon-surface =',
'optimize-fft = no', '',
'; IMPLICIT SOLVENT ALGORITHM', 'implicit-solvent = No',
'', '; OPTIONS FOR WEAK COUPLING ALGORITHMS',
'; Temperature coupling', 'tcoupl = v-rescale',
'nsttcouple = -1', 'nh-chain-length = 1',
'; Groups to couple separately',
'tc-grps = System',
'; Time constant (ps) and reference temperature (K)',
'tau-t = 0.1',
f'ref-t = {temperature}', '; pressure coupling',
'pcoupl = Parrinello-Rahman',
'pcoupltype = Isotropic',
'nstpcouple = -1',
'; Time constant (ps), compressibility (1/bar) and reference P (bar)',
'tau-p = 1.0',
'compressibility = 4.6e-5',
'ref-p = 1',
'; Scaling of reference coordinates, No, All or COM',
'refcoord-scaling = COM', '',
'; GENERATE VELOCITIES FOR STARTUP RUN',
'gen-vel = no',
f'gen-temp = {temperature}',
'gen-seed = 173529', '', '; OPTIONS FOR BONDS',
'constraints = all-bonds',
'; Type of constraint algorithm',
'constraint-algorithm = Lincs',
'; Do not constrain the start configuration',
'continuation = yes',
'; Use successive overrelaxation to reduce the number of shake iterations',
'Shake-SOR = no', '; Relative tolerance of shake',
'shake-tol = 0.00001',
'; Highest order in the expansion of the constraint coupling matrix',
'lincs-order = 5',
'; Number of iterations in the final step of LINCS. 1 is fine for',
'; normal simulations, but use 2 to conserve energy in NVE runs.',
'; For energy minimization with constraints it should be 4 to 8.',
'lincs-iter = 2',
'; Lincs will write a warning to the stderr if in one step a bond',
'; rotates over more degrees than',
'lincs-warnangle = 30',
'; Convert harmonic bonds to morse potentials',
'morse = no', '', 'FREE', 'COM'
]
assert instance._template == expected_mdp
def test_hook_lambda_None(self, mocker):
mocked_template = \
mocker.patch.object(mdp_files.CreateVdwMdpUnboundState, '_create_template')
mdp_file = 'mdp.mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = None
instance = mdp_files.CreateVdwMdpUnboundState(
mdp_file, alchemical_molecule, timestep_ps, number_of_steps,
temperature, lambda_steps, COM_pull_goups)
instance._hook()
mocked_template.assert_called_once()
assert instance.lambda_steps == (1. / number_of_steps)
def test_hook_lambda_input(self, mocker):
mocked_template = \
mocker.patch.object(mdp_files.CreateVdwMdpUnboundState, '_create_template')
mdp_file = 'mdp.mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = 33
COM_pull_goups = None
instance = mdp_files.CreateVdwMdpUnboundState(
mdp_file, alchemical_molecule, timestep_ps, number_of_steps,
temperature, lambda_steps, COM_pull_goups)
instance._hook()
mocked_template.assert_called_once()
assert instance.lambda_steps == lambda_steps
class TestCreateQMdpUnboundState():
def test__create_template(self, mocker):
mocked_COM = mocker.patch.object(mdp_files.CreateQMdpUnboundState,
'_create_COMCOM_pulling_strings',
return_value=['COM'])
mocked_free = mocker.patch.object(mdp_files.CreateQMdpUnboundState,
'_create_free_energy_strings',
return_value=['FREE'])
mdp_file = 'mdp.mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = None
instance = mdp_files.CreateQMdpUnboundState(
mdp_file, alchemical_molecule, timestep_ps, number_of_steps,
temperature, lambda_steps, COM_pull_goups)
instance._create_template()
mocked_COM.assert_called_once()
mocked_free.assert_called_once()
expected_mdp = [
'; VARIOUS PREPROCESSING OPTIONS',
'; Preprocessor information: use cpp syntax.',
'; e.g.: -I/home/joe/doe -I/home/mary/roe',
'include =',
'; e.g.: -DPOSRES -DFLEXIBLE (note these variable names are case sensitive)',
'define =', '', '; RUN CONTROL PARAMETERS',
'integrator = md', '; Start time and timestep in ps',
'tinit = 0',
f'dt = {timestep_ps}',
f'nsteps = {number_of_steps}',
'; For exact run continuation or redoing part of a run',
'init-step = 0',
'; Part index is updated automatically on checkpointing (keeps files separate)',
'simulation-part = 1',
'; mode for center of mass motion removal',
'comm-mode = Linear',
'; number of steps for center of mass motion removal',
'nstcomm = 100',
'; group(s) for center of mass motion removal',
'comm-grps =', '',
'; TEST PARTICLE INSERTION OPTIONS',
'rtpi = 0.05', '', '; OUTPUT CONTROL OPTIONS',
'; Output frequency for coords (x), velocities (v) and forces (f)',
'nstxout = 10000',
'nstvout = 10000',
'nstfout = 10000',
'; Output frequency for energies to log file and energy file',
'nstlog = 500', 'nstcalcenergy = 50',
'nstenergy = 1000',
'; Output frequency and precision for .xtc file',
'nstxtcout = 2000',
'xtc-precision = 1000',
'; This selects the subset of atoms for the .xtc file. You can',
'; select multiple groups. By default all atoms will be written.',
'xtc-grps =', '; Selection of energy groups',
'energygrps = System', '',
'; NEIGHBORSEARCHING PARAMETERS',
'; cut-off scheme (group: using charge groups, Verlet: particle based cut-offs)',
'; nblist update frequency', 'cutoff-scheme = Verlet',
'nstlist = 20',
'verlet-buffer-tolerance = 0.0001',
'; ns algorithm (simple or grid)',
'ns_type = grid',
'; Periodic boundary conditions: xyz, no, xy',
'pbc = xyz', 'periodic-molecules = no',
'; Allowed energy drift due to the Verlet buffer in kJ/mol/ps per atom,',
'; a value of -1 means: use rlist', '; nblist cut-off',
'rlist = 1.0',
'; long-range cut-off for switched potentials',
'rlistlong = -1', '',
'; OPTIONS FOR ELECTROSTATICS AND VDW',
'; Method for doing electrostatics',
'coulombtype = PME', 'rcoulomb-switch = 0',
'rcoulomb = 1.0',
'; Relative dielectric constant for the medium and the reaction field',
'epsilon-r = 1', 'epsilon-rf = 0',
'; Method for doing Van der Waals',
'vdw-type = Cut-off', '; cut-off lengths',
'rvdw-switch = 0', 'rvdw = 1.0',
'; Apply long range dispersion corrections for Energy and Pressure',
'DispCorr = EnerPres',
'; Extension of the potential lookup tables beyond the cut-off',
'table-extension = 1',
'; Separate tables between energy group pairs',
'energygrp-table =',
'; Spacing for the PME/PPPM FFT grid',
'fourierspacing = 0.1',
'; FFT grid size, when a value is 0 fourierspacing will be used',
'fourier-nx = 0', 'fourier-ny = 0',
'fourier-nz = 0', '; EWALD/PME/PPPM parameters',
'pme-order = 4', 'ewald-rtol = 1e-05',
'ewald-geometry = 3d', 'epsilon-surface =',
'optimize-fft = no', '',
'; IMPLICIT SOLVENT ALGORITHM', 'implicit-solvent = No',
'', '; OPTIONS FOR WEAK COUPLING ALGORITHMS',
'; Temperature coupling', 'tcoupl = v-rescale',
'nsttcouple = -1', 'nh-chain-length = 1',
'; Groups to couple separately',
'tc-grps = System',
'; Time constant (ps) and reference temperature (K)',
'tau-t = 0.1',
f'ref-t = {temperature}', '; pressure coupling',
'pcoupl = Parrinello-Rahman',
'pcoupltype = Isotropic',
'nstpcouple = -1',
'; Time constant (ps), compressibility (1/bar) and reference P (bar)',
'tau-p = 1.0',
'compressibility = 4.6e-5',
'ref-p = 1',
'; Scaling of reference coordinates, No, All or COM',
'refcoord-scaling = COM', '',
'; GENERATE VELOCITIES FOR STARTUP RUN',
'gen-vel = no',
f'gen-temp = {temperature}',
'gen-seed = 173529', '', '; OPTIONS FOR BONDS',
'constraints = all-bonds',
'; Type of constraint algorithm',
'constraint-algorithm = Lincs',
'; Do not constrain the start configuration',
'continuation = yes',
'; Use successive overrelaxation to reduce the number of shake iterations',
'Shake-SOR = no', '; Relative tolerance of shake',
'shake-tol = 0.00001',
'; Highest order in the expansion of the constraint coupling matrix',
'lincs-order = 5',
'; Number of iterations in the final step of LINCS. 1 is fine for',
'; normal simulations, but use 2 to conserve energy in NVE runs.',
'; For energy minimization with constraints it should be 4 to 8.',
'lincs-iter = 2',
'; Lincs will write a warning to the stderr if in one step a bond',
'; rotates over more degrees than',
'lincs-warnangle = 30',
'; Convert harmonic bonds to morse potentials',
'morse = no', '', 'FREE', 'COM'
]
assert instance._template == expected_mdp
def test_hook_lambda_None(self, mocker):
mocked_template = \
mocker.patch.object(mdp_files.CreateQMdpUnboundState, '_create_template')
mdp_file = 'mdp.mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = None
COM_pull_goups = None
instance = mdp_files.CreateQMdpUnboundState(
mdp_file, alchemical_molecule, timestep_ps, number_of_steps,
temperature, lambda_steps, COM_pull_goups)
instance._hook()
mocked_template.assert_called_once()
assert instance.lambda_steps == (1. / number_of_steps)
def test_hook_lambda_input(self, mocker):
mocked_template = \
mocker.patch.object(mdp_files.CreateQMdpUnboundState, '_create_template')
mdp_file = 'mdp.mdp'
alchemical_molecule = 'alc'
timestep_ps = 0.002
number_of_steps = 1000
temperature = 297.20
lambda_steps = 33
COM_pull_goups = None
instance = mdp_files.CreateQMdpUnboundState(
mdp_file, alchemical_molecule, timestep_ps, number_of_steps,
temperature, lambda_steps, COM_pull_goups)
instance._hook()
mocked_template.assert_called_once()
assert instance.lambda_steps == lambda_steps
| 45.929763
| 93
| 0.487307
| 4,919
| 52,314
| 5.010571
| 0.094531
| 0.020773
| 0.030186
| 0.024993
| 0.930945
| 0.926117
| 0.912484
| 0.903031
| 0.903031
| 0.902422
| 0
| 0.029644
| 0.415147
| 52,314
| 1,138
| 94
| 45.970123
| 0.775918
| 0.008258
| 0
| 0.874214
| 0
| 0.004193
| 0.452605
| 0.01508
| 0
| 0
| 0
| 0
| 0.045073
| 1
| 0.025157
| false
| 0
| 0.002096
| 0
| 0.034591
| 0.004193
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a37942e26802e5405ec73f2e27a76f1fee46cc8d
| 2,320
|
py
|
Python
|
isiscb/isisdata/migrations/0016_auto_20160424_1654.py
|
bgopalachary/IsisCB
|
c28e3f504eea60ebeff38318d8bb2071abb28ebb
|
[
"MIT"
] | 4
|
2016-01-25T20:35:33.000Z
|
2020-04-07T15:39:52.000Z
|
isiscb/isisdata/migrations/0016_auto_20160424_1654.py
|
bgopalachary/IsisCB
|
c28e3f504eea60ebeff38318d8bb2071abb28ebb
|
[
"MIT"
] | 41
|
2015-08-19T17:34:41.000Z
|
2022-03-11T23:19:01.000Z
|
isiscb/isisdata/migrations/0016_auto_20160424_1654.py
|
bgopalachary/IsisCB
|
c28e3f504eea60ebeff38318d8bb2071abb28ebb
|
[
"MIT"
] | 2
|
2020-11-25T20:18:18.000Z
|
2021-06-24T15:15:41.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-24 16:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('isisdata', '0015_auto_20160424_1649'),
]
operations = [
migrations.AlterField(
model_name='citation',
name='abstract',
field=models.TextField(blank=True, help_text=b'Abstract or detailed summaries of a work.', null=True),
),
migrations.AlterField(
model_name='citation',
name='edition_details',
field=models.TextField(blank=True, help_text=b'Use for describing the edition or version of the resource. Include names of additional contributors if necessary for clarification (such as translators, introduction by, etc). Always, use relationship table to list contributors (even if they are specified here).', null=True),
),
migrations.AlterField(
model_name='citation',
name='physical_details',
field=models.CharField(blank=True, help_text=b'For describing the physical description of the resource. Use whatever information is appropriate for the type of resource.', max_length=255, null=True),
),
migrations.AlterField(
model_name='historicalcitation',
name='abstract',
field=models.TextField(blank=True, help_text=b'Abstract or detailed summaries of a work.', null=True),
),
migrations.AlterField(
model_name='historicalcitation',
name='edition_details',
field=models.TextField(blank=True, help_text=b'Use for describing the edition or version of the resource. Include names of additional contributors if necessary for clarification (such as translators, introduction by, etc). Always, use relationship table to list contributors (even if they are specified here).', null=True),
),
migrations.AlterField(
model_name='historicalcitation',
name='physical_details',
field=models.CharField(blank=True, help_text=b'For describing the physical description of the resource. Use whatever information is appropriate for the type of resource.', max_length=255, null=True),
),
]
| 50.434783
| 335
| 0.677586
| 275
| 2,320
| 5.621818
| 0.338182
| 0.07762
| 0.097025
| 0.112549
| 0.875809
| 0.875809
| 0.851876
| 0.851876
| 0.78784
| 0.78784
| 0
| 0.021336
| 0.232328
| 2,320
| 45
| 336
| 51.555556
| 0.846715
| 0.028879
| 0
| 0.789474
| 1
| 0.052632
| 0.460889
| 0.010222
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.131579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6e979e245029e8f802c7f44702d381d80380d82c
| 5,474
|
py
|
Python
|
tests/test_linked_list.py
|
betandr/algorithms_plus_data_structures
|
579b57954f487eb977b7419d91797d441a0c4584
|
[
"MIT"
] | null | null | null |
tests/test_linked_list.py
|
betandr/algorithms_plus_data_structures
|
579b57954f487eb977b7419d91797d441a0c4584
|
[
"MIT"
] | null | null | null |
tests/test_linked_list.py
|
betandr/algorithms_plus_data_structures
|
579b57954f487eb977b7419d91797d441a0c4584
|
[
"MIT"
] | null | null | null |
import unittest
from structures.linked_list import Node, LinkedList
class TestLinkedList(unittest.TestCase):
def setUp(self):
self._linked_list = LinkedList()
def test_is_empty(self):
assert(self._linked_list.is_empty())
self._linked_list.add_head(Node(1))
self.assertFalse(self._linked_list.is_empty())
def test_empty_linked_list(self):
empty = self._linked_list.linked_list_as_string()
assert(self._linked_list._counter == 0)
assert(empty == "HEAD -> TAIL")
def test_add_node_to_head(self):
self._linked_list.add_head(Node(1))
assert(self._linked_list._counter == 1)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 1 -> TAIL")
def test_add_two_nodes_to_head(self):
self._linked_list.add_head(Node(1))
self._linked_list.add_head(Node(2))
assert(self._linked_list._counter == 2)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 2 -> 1 -> TAIL")
def test_add_node_to_tail_of_empty_list(self):
self._linked_list.add_head(Node(1))
assert(self._linked_list._counter == 1)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 1 -> TAIL")
def test_add_node_to_tail_of_list_with_items(self):
self._linked_list.add_head(Node(1))
self._linked_list.add_head(Node(2))
assert(self._linked_list._counter == 2)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 2 -> 1 -> TAIL")
self._linked_list.add_tail(Node(0))
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 2 -> 1 -> 0 -> TAIL")
def test_add_multiple_tails(self):
self._linked_list.add_tail(Node(1))
self._linked_list.add_tail(Node(2))
self._linked_list.add_tail(Node(3))
self._linked_list.add_tail(Node(4))
assert(self._linked_list._counter == 4)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 1 -> 2 -> 3 -> 4 -> TAIL")
def test_remove_tail_from_linked_list_with_one_item(self):
self._linked_list.add_head(Node(1))
assert(self._linked_list._counter == 1)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 1 -> TAIL")
self._linked_list.remove_tail()
assert(self._linked_list._counter == 0)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> TAIL")
def test_remove_tail_from_linked_list_with_two_items(self):
self._linked_list.add_head(Node(1))
self._linked_list.add_head(Node(2))
assert(self._linked_list._counter == 2)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 2 -> 1 -> TAIL")
self._linked_list.remove_tail()
assert(self._linked_list._counter == 1)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 2 -> TAIL")
def test_remove_tail_from_linked_list_with_three_items(self):
self._linked_list.add_head(Node(1))
self._linked_list.add_head(Node(2))
self._linked_list.add_head(Node(3))
assert(self._linked_list._counter == 3)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 3 -> 2 -> 1 -> TAIL")
self._linked_list.remove_tail()
assert(self._linked_list._counter == 2)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 3 -> 2 -> TAIL")
def test_remove_head_from_linked_list_with_one_item(self):
self._linked_list.add_head(Node(1))
assert(self._linked_list._counter == 1)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 1 -> TAIL")
self._linked_list.remove_head()
assert(self._linked_list._counter == 0)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> TAIL")
def test_remove_head_from_linked_list_with_two_items(self):
self._linked_list.add_head(Node(1))
self._linked_list.add_head(Node(2))
assert(self._linked_list._counter == 2)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 2 -> 1 -> TAIL")
self._linked_list.remove_head()
assert(self._linked_list._counter == 1)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 1 -> TAIL")
def test_remove_head_from_linked_list_with_three_items(self):
self._linked_list.add_head(Node(1))
self._linked_list.add_head(Node(2))
self._linked_list.add_head(Node(3))
assert(self._linked_list._counter == 3)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 3 -> 2 -> 1 -> TAIL")
self._linked_list.remove_head()
assert(self._linked_list._counter == 2)
s = self._linked_list.linked_list_as_string()
assert(s == "HEAD -> 2 -> 1 -> TAIL")
def test_get_enumerator(self):
self._linked_list.add_head(Node(1))
self._linked_list.add_head(Node(2))
self._linked_list.add_head(Node(3))
assert(self._linked_list._counter == 3)
enumerator = self._linked_list.enumerator()
next = enumerator.get_next()
assert(next.value == 3)
next = enumerator.get_next()
assert(next.value == 2)
next = enumerator.get_next()
assert(next.value == 1)
| 39.956204
| 65
| 0.642674
| 778
| 5,474
| 4.075835
| 0.060411
| 0.321665
| 0.331126
| 0.144749
| 0.899401
| 0.87638
| 0.82813
| 0.794071
| 0.772627
| 0.738568
| 0
| 0.018926
| 0.227804
| 5,474
| 136
| 66
| 40.25
| 0.731251
| 0
| 0
| 0.686441
| 0
| 0
| 0.069967
| 0
| 0
| 0
| 0
| 0
| 0.364407
| 1
| 0.127119
| false
| 0
| 0.016949
| 0
| 0.152542
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6ea2bf591294feef8e5c6547a05e7ccd9a5a3697
| 35
|
py
|
Python
|
models/__init__.py
|
zhigangjiang/LGT-Net
|
d9a619158b2dc66a50c100e7fa7e491f1df16fd7
|
[
"MIT"
] | 11
|
2022-03-03T17:49:33.000Z
|
2022-03-25T11:23:11.000Z
|
models/__init__.py
|
zhigangjiang/LGT-Net
|
d9a619158b2dc66a50c100e7fa7e491f1df16fd7
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
zhigangjiang/LGT-Net
|
d9a619158b2dc66a50c100e7fa7e491f1df16fd7
|
[
"MIT"
] | 1
|
2022-03-04T06:39:50.000Z
|
2022-03-04T06:39:50.000Z
|
from models.lgt_net import LGT_Net
| 17.5
| 34
| 0.857143
| 7
| 35
| 4
| 0.714286
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
42e8f796377a92d846078d7453d6c0a8e25f6e38
| 62
|
py
|
Python
|
average_speed_radar/camera_integration/CameraIntegration.py
|
fnoquiq/average_speed_radar-radar_module
|
f424c3a47a877ffeca98c8b1510fdbdb7096f69e
|
[
"MIT"
] | 3
|
2020-05-11T00:55:54.000Z
|
2021-11-27T15:14:55.000Z
|
average_speed_radar/camera_integration/CameraIntegration.py
|
fnoquiq/average_speed_radar-radar_module
|
f424c3a47a877ffeca98c8b1510fdbdb7096f69e
|
[
"MIT"
] | 5
|
2019-05-13T02:40:25.000Z
|
2019-06-06T02:17:50.000Z
|
average_speed_radar/camera_integration/CameraIntegration.py
|
fnoquiq/average_speed_radar
|
f424c3a47a877ffeca98c8b1510fdbdb7096f69e
|
[
"MIT"
] | 1
|
2020-04-12T19:46:35.000Z
|
2020-04-12T19:46:35.000Z
|
import cv2
def get_camera():
return cv2.VideoCapture(0)
| 10.333333
| 30
| 0.709677
| 9
| 62
| 4.777778
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06
| 0.193548
| 62
| 5
| 31
| 12.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
2852bf3f763ec3b39ed59acfc30afc6c95777ae4
| 68,629
|
py
|
Python
|
tests/test_client_async.py
|
electronhead/whendo
|
27112834be0935b5b0f7ade4316e35532532e047
|
[
"MIT"
] | 1
|
2022-03-04T09:25:13.000Z
|
2022-03-04T09:25:13.000Z
|
tests/test_client_async.py
|
electronhead/whendo
|
27112834be0935b5b0f7ade4316e35532532e047
|
[
"MIT"
] | null | null | null |
tests/test_client_async.py
|
electronhead/whendo
|
27112834be0935b5b0f7ade4316e35532532e047
|
[
"MIT"
] | null | null | null |
"""
These test cases run the API within each test function call, allowing API http calls
to a live and albeit short-lived whendo server.
This class and the fixture, startup_and_shutdown_uvicorn, rely on asynchronous processing.
"""
import time
import pytest
from datetime import timedelta
from pydantic import BaseModel
from httpx import AsyncClient
from whendo.core.action import Action
import whendo.core.actions.file_action as file_x
import whendo.core.actions.dispatch_action as disp_x
import whendo.core.actions.http_action as http_x
import whendo.core.actions.sys_action as sys_x
from whendo.core.actions.list_action import All, Success, Vals
from whendo.core.actions.sys_action import SysInfo
from whendo.core.scheduler import Scheduler, Immediately
from whendo.core.schedulers.timed_scheduler import Timely
from whendo.core.dispatcher import Dispatcher
from whendo.core.program import Program
from whendo.core.programs.simple_program import PBEProgram
from whendo.core.server import Server
from whendo.core.util import (
FilePathe,
resolve_instance,
DateTime,
Now,
Http,
DateTime2,
Rez,
)
from whendo.core.resolver import (
resolve_action,
resolve_scheduler,
resolve_file_pathe,
resolve_server,
)
from .fixtures import port, host, startup_and_shutdown_uvicorn
from .client_async import ClientAsync
@pytest.mark.asyncio
async def test_client_1(startup_and_shutdown_uvicorn, host, port, tmp_path):
"""
add action and scheduler
"""
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
output_file = str(tmp_path / "output.txt")
action = file_x.FileAppend(
relative_to_output_dir=False, file=output_file, payload={"show": "two"}
)
scheduler = Timely(interval=1)
await add_action(client=client, action_name="foo", action=action)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
@pytest.mark.asyncio
async def test_client_2(startup_and_shutdown_uvicorn, host, port, tmp_path):
"""
add action and scheduler and then schedule the action
"""
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
output_file = str(tmp_path / "output.txt")
action = file_x.FileAppend(
relative_to_output_dir=False, file=output_file, payload={"show": "two"}
)
scheduler = Timely(interval=1)
await add_action(client=client, action_name="foo", action=action)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await schedule_action(client=client, action_name="foo", scheduler_name="bar")
await assert_job_count(client=client, n=1)
@pytest.mark.asyncio
async def test_client_3(startup_and_shutdown_uvicorn, host, port, tmp_path):
"""
add action and scheduler, run timed, and then make sure the action produced file output
"""
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
output_file = str(tmp_path / "output.txt")
action = file_x.FileAppend(
relative_to_output_dir=False, file=output_file, payload={"show": "two"}
)
scheduler = Timely(interval=1)
await add_action(client=client, action_name="foo", action=action)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await schedule_action(client=client, action_name="foo", scheduler_name="bar")
await assert_job_count(client=client, n=1)
await run_and_stop_jobs(client=client, pause=2)
lines = None
with open(action.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
@pytest.mark.asyncio
async def test_client_logic_action(startup_and_shutdown_uvicorn, host, port, tmp_path):
""" Run two actions within one action. """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
action3 = All(actions=[action1, action2])
scheduler = Timely(interval=1)
await add_action(client=client, action_name="foo", action=action3)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await schedule_action(client=client, action_name="foo", scheduler_name="bar")
await assert_job_count(client=client, n=1)
await run_and_stop_jobs(client=client, pause=2)
lines = None
with open(action1.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
lines = None
with open(action2.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
@pytest.mark.asyncio
async def test_set_action_1(startup_and_shutdown_uvicorn, host, port, tmp_path):
""" unschedule an action. """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
scheduler = Timely(interval=1)
await add_action(client=client, action_name="foo", action=action1)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await schedule_action(client=client, action_name="foo", scheduler_name="bar")
await assert_job_count(client=client, n=1)
await run_and_stop_jobs(client=client, pause=2)
lines = None
with open(action1.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
await set_action(client=client, action_name="foo", action=action2)
await assert_job_count(client=client, n=1)
await run_and_stop_jobs(client=client, pause=2)
lines = None
with open(action2.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
@pytest.mark.asyncio
async def test_unschedule_scheduler(startup_and_shutdown_uvicorn, host, port, tmp_path):
""" unschedule a scheduler. """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
scheduler = Timely(interval=1)
await add_action(client=client, action_name="foo1", action=action1)
await add_action(client=client, action_name="foo2", action=action2)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await schedule_action(client=client, action_name="foo1", scheduler_name="bar")
await schedule_action(client=client, action_name="foo2", scheduler_name="bar")
await assert_job_count(client=client, n=1)
await assert_scheduled_action_count(client=client, n=2)
await unschedule_scheduler(client=client, scheduler_name="bar")
await assert_job_count(client=client, n=0)
await assert_scheduled_action_count(client=client, n=0)
await get_action(client=client, action_name="foo1")
await get_action(client=client, action_name="foo2")
await get_scheduler(client=client, scheduler_name="bar")
@pytest.mark.asyncio
async def test_unschedule_all_schedulers(
startup_and_shutdown_uvicorn, host, port, tmp_path
):
""" unschedule a scheduler. """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
scheduler = Timely(interval=1)
await add_action(client=client, action_name="foo1", action=action1)
await add_action(client=client, action_name="foo2", action=action2)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await schedule_action(client=client, action_name="foo1", scheduler_name="bar")
await schedule_action(client=client, action_name="foo2", scheduler_name="bar")
await assert_job_count(client=client, n=1)
await assert_scheduled_action_count(client=client, n=2)
await unschedule_all(client=client)
await assert_job_count(client=client, n=0)
await assert_scheduled_action_count(client=client, n=0)
await get_action(client=client, action_name="foo1")
await get_action(client=client, action_name="foo2")
await get_scheduler(client=client, scheduler_name="bar")
@pytest.mark.asyncio
async def test_clear_all_scheduling(startup_and_shutdown_uvicorn, host, port, tmp_path):
""" clear all scheduling. """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
action3 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output3.txt"),
payload={"show": "two"},
)
action4 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output4.txt"),
payload={"show": "two"},
)
await add_action(client=client, action_name="foo1", action=action1)
await add_action(client=client, action_name="foo2", action=action2)
await add_action(client=client, action_name="foo3", action=action3)
await add_action(client=client, action_name="foo4", action=action4)
scheduler = Timely(interval=1)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await add_scheduler(
client=client, scheduler_name="immediately", scheduler=Immediately()
)
program = PBEProgram().prologue("foo4").epilogue("foo3").body_element("bar", "foo2")
await add_program(client=client, program_name="blink", program=program)
await schedule_action(client=client, action_name="foo1", scheduler_name="bar")
await defer_action(
client=client,
action_name="foo2",
scheduler_name="bar",
wait_until=DateTime(dt=Now.dt() + timedelta(seconds=10)),
)
await expire_action(
client=client,
action_name="foo2",
scheduler_name="bar",
expire_on=DateTime(dt=Now.dt() + timedelta(seconds=15)),
)
await schedule_program(
client=client,
program_name="blink",
start_stop=DateTime2(
dt1=Now.dt() + timedelta(seconds=10), dt2=Now.dt() + timedelta(seconds=15)
),
)
await assert_job_count(client=client, n=1)
await assert_scheduled_action_count(client=client, n=1)
await assert_deferred_action_count(client=client, n=1)
await assert_expiring_action_count(client=client, n=1)
await assert_deferred_program_count(client=client, n=1)
await clear_all_scheduling(client=client)
await assert_job_count(client=client, n=0)
await assert_scheduled_action_count(client=client, n=0)
await assert_deferred_action_count(client=client, n=0)
await assert_expiring_action_count(client=client, n=0)
await assert_deferred_program_count(client=client, n=0)
await get_action(client=client, action_name="foo1")
await get_action(client=client, action_name="foo2")
await get_scheduler(client=client, scheduler_name="bar")
await get_program(client=client, program_name="blink")
@pytest.mark.asyncio
async def test_job_count(startup_and_shutdown_uvicorn, host, port, tmp_path):
""" test job_count """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
scheduler = Timely(interval=1)
await add_action(client=client, action_name="foo1", action=action1)
await add_action(client=client, action_name="foo2", action=action2)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await schedule_action(client=client, action_name="foo1", scheduler_name="bar")
await schedule_action(client=client, action_name="foo2", scheduler_name="bar")
await assert_job_count(client=client, n=1)
await assert_scheduled_action_count(client=client, n=2)
@pytest.mark.asyncio
async def test_schedulers_action_count(
startup_and_shutdown_uvicorn, host, port, tmp_path
):
""" tests scheduled action count """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
scheduler = Timely(interval=1)
await add_action(client=client, action_name="foo1", action=action1)
await add_action(client=client, action_name="foo2", action=action2)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await schedule_action(client=client, action_name="foo1", scheduler_name="bar")
await schedule_action(client=client, action_name="foo2", scheduler_name="bar")
await assert_scheduled_action_count(client=client, n=2)
@pytest.mark.asyncio
async def test_replace_dispatcher(startup_and_shutdown_uvicorn, host, port, tmp_path):
""" replace innards of the active dispatcher """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
saved_dir = await get_saved_dir(client=client)
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
scheduler = Timely(interval=1)
await add_action(client=client, action_name="foo", action=action1)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await schedule_action(client=client, action_name="foo", scheduler_name="bar")
await assert_job_count(client=client, n=1)
await assert_scheduled_action_count(client=client, n=1)
# action1 doing its thing
await run_and_stop_jobs(client=client, pause=2)
lines = None
with open(action1.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
# replacement dispatcher
replacement = Dispatcher() # no saved_dir
replacement.add_action("flea", action2)
replacement.add_scheduler("bath", scheduler)
replacement.schedule_action(action_name="flea", scheduler_name="bath")
assert replacement.get_saved_dir() is None
await assert_job_count(client=client, n=1)
# do the business
await replace_dispatcher(client=client, replacement=replacement)
# check the business before checking the behavior of action2
await assert_scheduled_action_count(client=client, n=1)
await assert_job_count(client=client, n=0)
action3 = await get_action(client=client, action_name="flea")
assert action3 is not None
assert action3.file.count("output2") > 0
scheduler2 = await get_scheduler(client=client, scheduler_name="bath")
assert scheduler2 is not None
assert scheduler2.interval == 1
new_saved_dir = await get_saved_dir(client=client)
assert new_saved_dir == saved_dir
dispatcher = await load_dispatcher(client=client)
assert "flea" in dispatcher.get_actions()
assert "bath" in dispatcher.get_schedulers()
assert "bath" in dispatcher.get_scheduled_actions().scheduler_names()
assert "flea" in dispatcher.get_scheduled_actions().actions("bath")
# add the job
await assert_scheduled_action_count(client=client, n=1)
await reschedule_all(client=client)
await assert_job_count(client=client, n=1)
# did action2 do what it was supposed to do?
await run_and_stop_jobs(client=client, pause=2)
lines = None
with open(action2.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
@pytest.mark.asyncio
async def test_execute_action(startup_and_shutdown_uvicorn, host, port, tmp_path):
""" execute an action at a host/port """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output.txt"),
payload={"show": "two"},
)
await add_action(client=client, action_name="foo", action=action)
await execute_action(client, "foo")
lines = None
with open(action.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
@pytest.mark.asyncio
async def test_execute_action_with_rez(
startup_and_shutdown_uvicorn, host, port, tmp_path
):
""" execute an action at a host/port """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action = file_x.FileAppend(
relative_to_output_dir=False, file=str(tmp_path / "output.txt")
)
await add_action(client=client, action_name="foo", action=action)
await execute_action_with_rez(
client, "foo", rez=Rez(flds={"payload": {"fleas": "abound"}})
)
lines = None
with open(action.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
assert any("fleas" in line for line in lines)
@pytest.mark.asyncio
async def test_execute_supplied_action(
startup_and_shutdown_uvicorn, host, port, tmp_path
):
""" execute a supplied action """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output.txt"),
payload={"show": "two"},
)
await client.execute_supplied_action(action)
lines = None
with open(action.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
@pytest.mark.asyncio
async def test_execute_supplied_action_with_rez(
startup_and_shutdown_uvicorn, host, port, tmp_path
):
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action = file_x.FileAppend(
relative_to_output_dir=False, file=str(tmp_path / "output.txt")
)
rez = Rez(flds={"payload": {"higher": "and higher"}})
await client.execute_supplied_action_with_rez(supplied_action=action, rez=rez)
time.sleep(2)
lines = None
with open(action.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
assert any("and higher" in line for line in lines)
@pytest.mark.asyncio
async def test_defer_action(startup_and_shutdown_uvicorn, host, port, tmp_path):
"""
Want to observe the scheduling move from deferred state to ready state.
"""
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
scheduler = Timely(interval=1)
await add_action(client=client, action_name="foo", action=action)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await assert_deferred_action_count(client=client, n=0)
await assert_scheduled_action_count(client=client, n=0)
await defer_action(
client=client,
action_name="foo",
scheduler_name="bar",
wait_until=DateTime(dt=Now.dt() + timedelta(seconds=0)),
)
await assert_deferred_action_count(client=client, n=1)
await assert_scheduled_action_count(client=client, n=0)
time.sleep(6)
await assert_deferred_action_count(client=client, n=0)
await assert_scheduled_action_count(client=client, n=1)
await run_and_stop_jobs(client=client, pause=2)
lines = None
with open(action.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
@pytest.mark.asyncio
async def test_expire_action(startup_and_shutdown_uvicorn, host, port, tmp_path):
"""
Want to observe that an expired action is no longer scheduled
"""
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output.txt"),
payload={"show": "two"},
)
scheduler = Timely(interval=1)
await add_action(client=client, action_name="foo", action=action)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await schedule_action(client=client, action_name="foo", scheduler_name="bar")
time.sleep(0.5)
await assert_scheduled_action_count(client=client, n=1)
await assert_expiring_action_count(client=client, n=0)
await expire_action(
client=client,
action_name="foo",
scheduler_name="bar",
expire_on=DateTime(dt=Now.dt() + timedelta(seconds=1)),
)
await assert_expiring_action_count(client=client, n=1)
await assert_scheduled_action_count(client=client, n=1)
time.sleep(6)
await assert_expiring_action_count(client=client, n=0)
await assert_scheduled_action_count(client=client, n=0)
@pytest.mark.asyncio
async def test_immediately(startup_and_shutdown_uvicorn, host, port, tmp_path):
"""
Want to observe the file was written to and that schedulers_actions was not
affected.
"""
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output.txt"),
payload={"show": "two"},
)
scheduler = Immediately()
await add_action(client=client, action_name="foo", action=action)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await assert_scheduled_action_count(client=client, n=0)
await defer_action(
client=client,
action_name="foo",
scheduler_name="bar",
wait_until=DateTime(dt=Now.dt() + timedelta(seconds=1)),
)
await assert_scheduled_action_count(client=client, n=0)
time.sleep(6)
await assert_scheduled_action_count(client=client, n=0)
lines = None
with open(action.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
@pytest.mark.asyncio
async def test_program(startup_and_shutdown_uvicorn, host, port, tmp_path):
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
action3 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output3.txt"),
payload={"show": "two"},
)
scheduler = Timely(interval=1)
immediately = Immediately()
await add_action(client=client, action_name="foo1", action=action1)
await add_action(client=client, action_name="foo2", action=action2)
await add_action(client=client, action_name="foo3", action=action3)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await add_scheduler(
client=client, scheduler_name="immediately", scheduler=immediately
)
program = PBEProgram().prologue("foo1").epilogue("foo3").body_element("bar", "foo2")
await add_program(client=client, program_name="baz", program=program)
start = Now().dt()
stop = start + timedelta(seconds=4)
start_stop = DateTime2(dt1=start, dt2=stop)
await schedule_program(client=client, program_name="baz", start_stop=start_stop)
# action1,2,3 doing their things
await run_and_stop_jobs(client=client, pause=6)
lines = None
with open(action1.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
lines = None
with open(action2.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
lines = None
with open(action3.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
@pytest.mark.asyncio
async def test_unschedule_program(startup_and_shutdown_uvicorn, host, port, tmp_path):
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
action3 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output3.txt"),
payload={"show": "two"},
)
scheduler = Timely(interval=1)
immediately = Immediately()
await add_action(client=client, action_name="foo1", action=action1)
await add_action(client=client, action_name="foo2", action=action2)
await add_action(client=client, action_name="foo3", action=action3)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await add_scheduler(
client=client, scheduler_name="immediately", scheduler=immediately
)
program = PBEProgram().prologue("foo1").epilogue("foo3").body_element("bar", "foo2")
await add_program(client=client, program_name="baz", program=program)
start = Now.dt() + timedelta(seconds=2)
stop = start + timedelta(seconds=2)
start_stop = DateTime2(dt1=start, dt2=stop)
await schedule_program(client=client, program_name="baz", start_stop=start_stop)
time.sleep(1)
await assert_deferred_program_count(client=client, n=1)
await assert_scheduled_action_count(client=client, n=0)
await unschedule_program(client=client, program_name="baz")
await assert_deferred_program_count(client=client, n=0)
# action1,2,3 not doing their things
await run_and_stop_jobs(client=client, pause=6)
with pytest.raises(FileNotFoundError):
with open(action1.file, "r") as fid:
lines = fid.readlines()
with pytest.raises(FileNotFoundError):
with open(action2.file, "r") as fid:
lines = fid.readlines()
with pytest.raises(FileNotFoundError):
with open(action3.file, "r") as fid:
lines = fid.readlines()
@pytest.mark.asyncio
async def test_delete_program(startup_and_shutdown_uvicorn, host, port, tmp_path):
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
action3 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output3.txt"),
payload={"show": "two"},
)
scheduler = Timely(interval=1)
immediately = Immediately()
await add_action(client=client, action_name="foo1", action=action1)
await add_action(client=client, action_name="foo2", action=action2)
await add_action(client=client, action_name="foo3", action=action3)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await add_scheduler(
client=client, scheduler_name="immediately", scheduler=immediately
)
program = PBEProgram().prologue("foo1").epilogue("foo3").body_element("bar", "foo2")
await add_program(client=client, program_name="baz", program=program)
now = Now().dt()
start = now + timedelta(seconds=2)
stop = start + timedelta(seconds=2)
start_stop = DateTime2(dt1=start, dt2=stop)
await schedule_program(client=client, program_name="baz", start_stop=start_stop)
await assert_deferred_program_count(client=client, n=1)
await assert_scheduled_action_count(client=client, n=0)
await delete_program(client=client, program_name="baz")
await assert_deferred_program_count(client=client, n=0)
# action1,2,3 not doing their things
await run_and_stop_jobs(client=client, pause=2)
with pytest.raises(FileNotFoundError):
with open(action1.file, "r") as fid:
lines = fid.readlines()
with pytest.raises(FileNotFoundError):
with open(action2.file, "r") as fid:
lines = fid.readlines()
with pytest.raises(FileNotFoundError):
with open(action3.file, "r") as fid:
lines = fid.readlines()
@pytest.mark.asyncio
async def test_success(startup_and_shutdown_uvicorn, host, port, tmp_path):
""" make sure success.execute is a fixed point function """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action = Success()
await add_action(client=client, action_name="success", action=action)
rez = Rez(result={"fleas": "unite!"})
await execute_action_with_rez(client=client, action_name="success", rez=rez)
@pytest.mark.asyncio
async def test_file_append_1(startup_and_shutdown_uvicorn, host, port, tmp_path):
""" test basic use of FileAppend """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output.txt"),
payload={"free": "pyrambium"},
)
scheduler = Timely(interval=1)
await add_action(client=client, action_name="foo", action=action)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await schedule_action(client=client, action_name="foo", scheduler_name="bar")
await run_and_stop_jobs(client=client, pause=2)
lines = None
with open(action.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
assert any("pyrambium" in line for line in lines)
@pytest.mark.asyncio
async def test_file_append_2(startup_and_shutdown_uvicorn, host, port, tmp_path):
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output.txt"),
payload={"hi": "pyrambium"},
)
action2 = SysInfo()
action3 = All(actions=[action2, action1])
scheduler = Timely(interval=1)
await add_action(client=client, action_name="foo", action=action3)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await schedule_action(client=client, action_name="foo", scheduler_name="bar")
await run_and_stop_jobs(client=client, pause=2)
lines = None
with open(action1.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
assert any("virtual_memory" in line for line in lines)
@pytest.mark.asyncio
async def test_file_append_execute_action(
startup_and_shutdown_uvicorn, tmp_path, host, port
):
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
server = Server(host=host, port=port, tags={"role": ["pivot"]})
assert server.port == port
assert server.host == host
await add_server(client=client, server_name="test", server=server)
file_append = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output.txt"),
payload={"hi": "pyrambium"},
)
info = sys_x.SysInfo()
execute_action = disp_x.Exec(server_name="test", action_name="file_append")
actions = All(actions=[info, execute_action])
timely = Timely(interval=1)
await add_action(client=client, action_name="file_append", action=file_append)
await add_action(client=client, action_name="actions", action=actions)
await add_scheduler(client=client, scheduler_name="timely", scheduler=timely)
await schedule_action(client=client, action_name="actions", scheduler_name="timely")
await assert_scheduled_action_count(client=client, n=1)
await run_and_stop_jobs(client=client, pause=2)
lines = None
with open(file_append.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
assert any("virtual_memory" in line for line in lines)
@pytest.mark.asyncio
async def test_file_append_execute_supplied_action(
startup_and_shutdown_uvicorn, tmp_path, host, port
):
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
server = Server(host=host, port=port, tags={"role": ["pivot"]})
assert server.port == port
assert server.host == host
await add_server(client=client, server_name="test", server=server)
file_append = file_x.FileAppend(
# mode = "D",
relative_to_output_dir=False,
file=str(tmp_path / "output.txt"),
payload={"hi": "pyrambium"},
)
info = sys_x.SysInfo()
execute_action = disp_x.ExecSupplied(server_name="test", action=file_append)
actions = All(actions=[info, execute_action])
timely = Timely(interval=1)
await add_action(client=client, action_name="actions", action=actions)
await add_scheduler(client=client, scheduler_name="timely", scheduler=timely)
await schedule_action(client=client, action_name="actions", scheduler_name="timely")
await assert_scheduled_action_count(client=client, n=1)
await run_and_stop_jobs(client=client, pause=2)
lines = None
with open(file_append.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
assert any("virtual_memory" in line for line in lines)
@pytest.mark.asyncio
async def test_file_append_execute_action_key_tags_1(
startup_and_shutdown_uvicorn, tmp_path, host, port
):
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
server = Server(host=host, port=port, tags={"role": ["pivot"]})
assert server.port == port
assert server.host == host
await add_server(client=client, server_name="test", server=server)
file_append = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output.txt"),
payload={"hi": "pyrambium"},
)
info = sys_x.SysInfo()
execute_action = disp_x.ExecKeyTags(
key_tags={"role": ["pivot"]}, action_name="file_append"
)
actions = All(actions=[info, execute_action])
timely = Timely(interval=1)
await add_action(client=client, action_name="file_append", action=file_append)
await add_action(client=client, action_name="actions", action=actions)
await add_scheduler(client=client, scheduler_name="timely", scheduler=timely)
await schedule_action(client=client, action_name="actions", scheduler_name="timely")
await assert_scheduled_action_count(client=client, n=1)
await run_and_stop_jobs(client=client, pause=2)
lines = None
with open(file_append.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
assert any("virtual_memory" in line for line in lines)
@pytest.mark.asyncio
async def test_file_append_execute_action_key_tags_2(
startup_and_shutdown_uvicorn, tmp_path, host, port
):
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
server = Server(host=host, port=port, tags={"role": ["pivot"]})
assert server.port == port
assert server.host == host
await add_server(client=client, server_name="test", server=server)
file_append = file_x.FileAppend(
relative_to_output_dir=False,
payload={"hi": "pyrambium"},
)
file = str(tmp_path / "output.txt")
vals = Vals(vals={"file": file, "print_header": False})
info = sys_x.SysInfo()
execute_action = disp_x.ExecKeyTags(
key_tags={"role": ["pivot"]}, action_name="file_append"
)
actions = All(actions=[vals, info, execute_action])
timely = Timely(interval=1)
await add_action(client=client, action_name="file_append", action=file_append)
await add_action(client=client, action_name="actions", action=actions)
await add_scheduler(client=client, scheduler_name="timely", scheduler=timely)
await schedule_action(client=client, action_name="actions", scheduler_name="timely")
await assert_scheduled_action_count(client=client, n=1)
await run_and_stop_jobs(client=client, pause=2)
lines = None
with open(file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
assert any("virtual_memory" in line for line in lines)
assert not any("---" in line for line in lines) # header line
@pytest.mark.asyncio
async def test_file_append_execute_supplied_action_key_tag(
startup_and_shutdown_uvicorn, tmp_path, host, port
):
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
server = Server(host=host, port=port, tags={"role": ["pivot"]})
assert server.port == port
assert server.host == host
await add_server(client=client, server_name="test", server=server)
file_append = file_x.FileAppend(
# mode = "D",
relative_to_output_dir=False,
file=str(tmp_path / "output.txt"),
payload={"hi": "pyrambium"},
)
info = sys_x.SysInfo()
execute_action = disp_x.ExecSuppliedKeyTags(
key_tags={"role": ["pivot"]}, action=file_append
)
actions = All(actions=[info, execute_action])
timely = Timely(interval=1)
await add_action(client=client, action_name="file_append", action=file_append)
await add_action(client=client, action_name="actions", action=actions)
await add_scheduler(client=client, scheduler_name="timely", scheduler=timely)
await schedule_action(client=client, action_name="actions", scheduler_name="timely")
await assert_scheduled_action_count(client=client, n=1)
await run_and_stop_jobs(client=client, pause=2)
lines = None
with open(file_append.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
assert any("virtual_memory" in line for line in lines)
@pytest.mark.asyncio
async def test_collections(startup_and_shutdown_uvicorn, host, port, tmp_path):
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
server = Server(host=host, port=port, tags={"role": ["pivot"]})
assert server.port == port
assert server.host == host
await add_server(client=client, server_name="test", server=server)
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
action3 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output3.txt"),
payload={"show": "two"},
)
scheduler = Timely(interval=1)
immediately = Immediately()
await add_action(client=client, action_name="foo1", action=action1)
await add_action(client=client, action_name="foo2", action=action2)
await add_action(client=client, action_name="foo3", action=action3)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await add_scheduler(
client=client, scheduler_name="immediately", scheduler=immediately
)
program = PBEProgram().prologue("foo1").epilogue("foo3").body_element("bar", "foo2")
await add_program(client=client, program_name="baz", program=program)
start = Now().dt()
stop = start + timedelta(seconds=4)
start_stop = DateTime2(dt1=start, dt2=stop)
await schedule_program(client=client, program_name="baz", start_stop=start_stop)
# action1,2,3 doing their things
await run_and_stop_jobs(client=client, pause=6)
lines = None
with open(action1.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
lines = None
with open(action2.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
lines = None
with open(action3.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
actions = await client.get_actions()
schedulers = await client.get_schedulers()
programs = await client.get_programs()
servers = await client.get_servers()
assert len(actions) == 3
assert len(schedulers) == 2
assert len(programs) == 1
assert len(servers) == 1
assert actions["foo1"] == action1
assert actions["foo2"] == action2
assert actions["foo3"] == action3
assert schedulers["bar"] == scheduler
assert schedulers["immediately"] == immediately
assert servers["test"] == server
@pytest.mark.asyncio
async def test_scheduling_info(startup_and_shutdown_uvicorn, host, port, tmp_path):
""" test Scheduling_Info. """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
action3 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output3.txt"),
payload={"show": "two"},
)
action4 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output4.txt"),
payload={"show": "two"},
)
await add_action(client=client, action_name="foo1", action=action1)
await add_action(client=client, action_name="foo2", action=action2)
await add_action(client=client, action_name="foo3", action=action3)
await add_action(client=client, action_name="foo4", action=action4)
scheduler = Timely(interval=1)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await add_scheduler(
client=client, scheduler_name="immediately", scheduler=Immediately()
)
program = PBEProgram().prologue("foo4").epilogue("foo3").body_element("bar", "foo2")
await add_program(client=client, program_name="blink", program=program)
await schedule_action(client=client, action_name="foo1", scheduler_name="bar")
await defer_action(
client=client,
action_name="foo2",
scheduler_name="bar",
wait_until=DateTime(dt=Now.dt() + timedelta(seconds=10)),
)
await expire_action(
client=client,
action_name="foo2",
scheduler_name="bar",
expire_on=DateTime(dt=Now.dt() + timedelta(seconds=15)),
)
await schedule_program(
client=client,
program_name="blink",
start_stop=DateTime2(
dt1=Now.dt() + timedelta(seconds=10), dt2=Now.dt() + timedelta(seconds=15)
),
)
await assert_job_count(client=client, n=1)
await assert_scheduled_action_count(client=client, n=1)
await assert_deferred_action_count(client=client, n=1)
await assert_expiring_action_count(client=client, n=1)
await assert_deferred_program_count(client=client, n=1)
info = disp_x.SchedulingInfo().execute()
assert info.result and isinstance(info.result, dict)
scheduling_info = info.result
assert 1 == len(scheduling_info["scheduled_actions"].scheduler_actions)
assert 1 == len(
scheduling_info["deferred_scheduled_actions"].dated_scheduled_actions
)
assert 1 == len(
scheduling_info["expiring_scheduled_actions"].dated_scheduled_actions
)
assert 1 == len(scheduling_info["deferred_programs"].deferred_programs)
await clear_all_scheduling(client=client)
info = disp_x.SchedulingInfo().execute()
assert info.result and isinstance(info.result, dict)
scheduling_info = info.result
assert 0 == len(scheduling_info["scheduled_actions"].scheduler_actions)
assert 0 == len(
scheduling_info["deferred_scheduled_actions"].dated_scheduled_actions
)
assert 0 == len(
scheduling_info["expiring_scheduled_actions"].dated_scheduled_actions
)
assert 0 == len(scheduling_info["deferred_programs"].deferred_programs)
@pytest.mark.asyncio
async def test_unschedule_active_program(startup_and_shutdown_uvicorn, host, port, tmp_path):
""" test unscheduling the active elements of a program. """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
action3 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output3.txt"),
payload={"show": "two"},
)
action4 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output4.txt"),
payload={"show": "two"},
)
action5 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output5.txt"),
payload={"show": "two"},
)
action6 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output6.txt"),
payload={"show": "two"},
)
await add_action(client=client, action_name="foo1", action=action1)
await add_action(client=client, action_name="foo2", action=action2)
await add_action(client=client, action_name="foo3", action=action3)
await add_action(client=client, action_name="foo4", action=action4)
await add_action(client=client, action_name="foo5", action=action5)
await add_action(client=client, action_name="foo6", action=action6)
scheduler = Timely(interval=0.5)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await add_scheduler(
client=client, scheduler_name="immediately", scheduler=Immediately()
)
program1 = PBEProgram().prologue("foo1").epilogue("foo3").body_element("bar", "foo2")
await add_program(client=client, program_name="program1", program=program1)
program2 = PBEProgram().prologue("foo4").epilogue("foo6").body_element("bar", "foo5")
await add_program(client=client, program_name="program2", program=program2)
await schedule_program(
client=client,
program_name="program1",
start_stop=DateTime2(
dt1=Now.dt(), dt2=Now.dt() + timedelta(seconds=5)
),
)
await schedule_program(
client=client,
program_name="program2",
start_stop=DateTime2(
dt1=Now.dt() + timedelta(seconds=10), dt2=Now.dt() + timedelta(seconds=20)
),
)
time.sleep(1)
# action2, (action1 has probably finished)
await assert_scheduled_action_count(client=client, n=1)
# action3 deferred
await assert_deferred_action_count(client=client, n=1)
# action2 expiring
await assert_expiring_action_count(client=client, n=1)
# program2 waiting around
await assert_deferred_program_count(client=client, n=1)
await run_and_stop_jobs(client=client, pause=2)
await unschedule_active_program(client=client, program_name= "program1")
# action1,2,3 doing their things or not
lines = None
with open(action1.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
lines = None
with open(action2.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
with pytest.raises(FileNotFoundError):
with open(action3.file, "r") as fid:
lines = fid.readlines()
with pytest.raises(FileNotFoundError):
with open(action4.file, "r") as fid:
lines = fid.readlines()
with pytest.raises(FileNotFoundError):
with open(action5.file, "r") as fid:
lines = fid.readlines()
with pytest.raises(FileNotFoundError):
with open(action6.file, "r") as fid:
lines = fid.readlines()
info = disp_x.SchedulingInfo().execute()
assert info.result and isinstance(info.result, dict)
scheduling_info = info.result
assert 0 == len(scheduling_info["scheduled_actions"].scheduler_actions)
assert 0 == len(
scheduling_info["deferred_scheduled_actions"].dated_scheduled_actions
)
assert 0 == len(
scheduling_info["expiring_scheduled_actions"].dated_scheduled_actions
)
# program2 waiting around
assert 1 == len(scheduling_info["deferred_programs"].deferred_programs)
await clear_all_scheduling(client=client)
@pytest.mark.asyncio
async def test_unschedule_active_program_2(startup_and_shutdown_uvicorn, host, port, tmp_path):
""" test unscheduling the active elements of a program. """
client = ClientAsync(host=host, port=port)
await reset_dispatcher(client, str(tmp_path))
action1 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output1.txt"),
payload={"show": "two"},
)
action2 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output2.txt"),
payload={"show": "two"},
)
action3 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output3.txt"),
payload={"show": "two"},
)
action4 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output4.txt"),
payload={"show": "two"},
)
action5 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output5.txt"),
payload={"show": "two"},
)
action6 = file_x.FileAppend(
relative_to_output_dir=False,
file=str(tmp_path / "output6.txt"),
payload={"show": "two"},
)
await add_action(client=client, action_name="foo1", action=action1)
await add_action(client=client, action_name="foo2", action=action2)
await add_action(client=client, action_name="foo3", action=action3)
await add_action(client=client, action_name="foo4", action=action4)
await add_action(client=client, action_name="foo5", action=action5)
await add_action(client=client, action_name="foo6", action=action6)
scheduler = Timely(interval=0.5)
await add_scheduler(client=client, scheduler_name="bar", scheduler=scheduler)
await add_scheduler(
client=client, scheduler_name="immediately", scheduler=Immediately()
)
program1 = PBEProgram().prologue("foo1").epilogue("foo3").body_element("bar", "foo2")
await add_program(client=client, program_name="program1", program=program1)
program2 = PBEProgram().prologue("foo4").epilogue("foo6").body_element("bar", "foo5")
await add_program(client=client, program_name="program2", program=program2)
unscheduleActiveProgram = disp_x.UnscheduleActiveProgram(program_name="program1")
await add_action(client=client, action_name="unscheduleActiveProgram", action=unscheduleActiveProgram)
await schedule_program(
client=client,
program_name="program1",
start_stop=DateTime2(
dt1=Now.dt(), dt2=Now.dt() + timedelta(seconds=5)
),
)
await schedule_program(
client=client,
program_name="program2",
start_stop=DateTime2(
dt1=Now.dt() + timedelta(seconds=10), dt2=Now.dt() + timedelta(seconds=20)
),
)
time.sleep(1)
# action2, (action1 has probably finished)
await assert_scheduled_action_count(client=client, n=1)
# action3 deferred
await assert_deferred_action_count(client=client, n=1)
# action2 expiring
await assert_expiring_action_count(client=client, n=1)
# program2 waiting around
await assert_deferred_program_count(client=client, n=1)
await run_and_stop_jobs(client=client, pause=2)
await execute_action(client=client, action_name="unscheduleActiveProgram")
# action1,2,3 doing their things or not
lines = None
with open(action1.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
lines = None
with open(action2.file, "r") as fid:
lines = fid.readlines()
assert lines is not None and isinstance(lines, list) and len(lines) >= 1
with pytest.raises(FileNotFoundError):
with open(action3.file, "r") as fid:
lines = fid.readlines()
with pytest.raises(FileNotFoundError):
with open(action4.file, "r") as fid:
lines = fid.readlines()
with pytest.raises(FileNotFoundError):
with open(action5.file, "r") as fid:
lines = fid.readlines()
with pytest.raises(FileNotFoundError):
with open(action6.file, "r") as fid:
lines = fid.readlines()
info = disp_x.SchedulingInfo().execute()
assert info.result and isinstance(info.result, dict)
scheduling_info = info.result
assert 0 == len(scheduling_info["scheduled_actions"].scheduler_actions)
assert 0 == len(
scheduling_info["deferred_scheduled_actions"].dated_scheduled_actions
)
assert 0 == len(
scheduling_info["expiring_scheduled_actions"].dated_scheduled_actions
)
# program2 waiting around
assert 1 == len(scheduling_info["deferred_programs"].deferred_programs)
await clear_all_scheduling(client=client)
# ==========================================
# helpers
async def get_program(client: ClientAsync, program_name: str):
""" add a program and confirm """
program = await client.get_program(program_name=program_name)
assert isinstance(program, Program), str(type(program))
return program
async def add_program(client: ClientAsync, program_name: str, program: Program):
""" add a program and confirm """
response = await client.add_program(program_name=program_name, program=program)
assert (
response.status_code == 200
), f"failed to add program ({program_name}) detail ({response.json()})"
retrieved_program = await client.get_program(program_name=program_name)
assert isinstance(retrieved_program, Program), str(type(retrieved_program))
async def set_program(client: ClientAsync, program_name: str, program: Program):
""" add a program and confirm """
response = await client.set_program(program_name=program_name, program=program)
assert response.status_code == 200, f"failed to set program ({program_name})"
retrieved_program = await client.get_program(program_name=program_name)
assert isinstance(retrieved_program, Program), str(type(retrieved_program))
async def delete_program(client: ClientAsync, program_name: str):
""" delete a program """
response = await client.delete_program(
program_name=program_name,
)
assert response.status_code == 200, f"failed to delete program ({program_name})"
async def schedule_program(
client: ClientAsync, program_name: str, start_stop: DateTime2
):
""" schedule a program """
response = await client.schedule_program(
program_name=program_name, start_stop=start_stop
)
assert response.status_code == 200, f"failed to schedule program ({program_name})"
async def unschedule_program(client: ClientAsync, program_name: str):
""" unschedule a program """
response = await client.unschedule_program(
program_name=program_name,
)
assert response.status_code == 200, f"failed to unschedule program ({program_name})"
async def unschedule_active_program(client: ClientAsync, program_name: str):
""" schedule the active elements of a program """
response = await client.unschedule_active_program(
program_name=program_name,
)
assert response.status_code == 200, f"failed to unschedule active elements of program ({program_name})"
async def add_server(client: ClientAsync, server_name: str, server: Server):
""" add a server and confirm """
response = await client.add_server(server_name=server_name, server=server)
assert response.status_code == 200, f"failed to add server ({server_name})"
retrieved = await client.get_server(server_name=server_name)
assert isinstance(retrieved, Server), str(type(retrieved))
async def set_server(client: ClientAsync, server_name: str, server: Server):
""" set a server and confirm """
response = await client.set_server(server_name=server_name, server=server)
assert response.status_code == 200, f"failed to set server ({server_name})"
retrieved = await client.get_server(server_name=server_name)
assert isinstance(retrieved, Server), str(type(retrieved))
async def delete_server(client: ClientAsync, server_name: str):
""" delete a server """
response = await client.delete_server(server_name=server_name)
assert response.status_code == 200, f"failed to delete server ({server_name})"
async def get_action(client: ClientAsync, action_name: str):
""" make sure action exists and resolves properly """
retrieved_action = await client.get_action(action_name=action_name)
assert isinstance(retrieved_action, Action), str(type(retrieved_action))
return retrieved_action
async def add_action(client: ClientAsync, action_name: str, action: Action):
""" add an action and confirm """
response = await client.add_action(action_name=action_name, action=action)
assert response.status_code == 200, f"failed to put action ({action_name})"
retrieved_action = await client.get_action(action_name=action_name)
assert isinstance(retrieved_action, Action), str(type(retrieved_action))
async def set_action(client: ClientAsync, action_name: str, action: Action):
""" set an action and confirm """
response = await client.set_action(action_name=action_name, action=action)
assert response.status_code == 200, f"failed to put action ({action_name})"
retrieved_action = await client.get_action(action_name=action_name)
assert isinstance(retrieved_action, Action), str(type(retrieved_action))
async def execute_action(client: ClientAsync, action_name: str):
response = await client.execute_action(action_name=action_name)
assert response.status_code == 200, f"failed to execute action ({action_name})"
async def execute_action_with_rez(client: ClientAsync, action_name: str, rez: Rez):
response = await client.execute_action_with_rez(action_name=action_name, rez=rez)
assert (
response.status_code == 200
), f"failed to execute action ({action_name}) with rez ({rez})"
return response.json()
async def get_scheduler(client: ClientAsync, scheduler_name: str):
""" make sure scheduler exists and resolves properly """
retrieved_scheduler = await client.get_scheduler(scheduler_name=scheduler_name)
assert isinstance(retrieved_scheduler, Scheduler), str(type(retrieved_scheduler))
return retrieved_scheduler
async def add_scheduler(client: ClientAsync, scheduler_name: str, scheduler: Scheduler):
""" add a scheduler and confirm """
response = await client.add_scheduler(
scheduler_name=scheduler_name, scheduler=scheduler
)
assert response.status_code == 200, f"failed to put scheduler ({scheduler_name})"
retrieved_scheduler = await client.get_scheduler(scheduler_name=scheduler_name)
assert isinstance(retrieved_scheduler, Scheduler), str(type(retrieved_scheduler))
async def set_scheduler(client: ClientAsync, scheduler_name: str, scheduler: Scheduler):
""" add a scheduler and confirm """
response = await client.set_scheduler(
scheduler_name=scheduler_name, scheduler=scheduler
)
assert response.status_code == 200, f"failed to put scheduler ({scheduler_name})"
retrieved_scheduler = await client.get_scheduler(scheduler_name=scheduler_name)
assert isinstance(retrieved_scheduler, Scheduler), str(type(retrieved_scheduler))
async def unschedule_scheduler(client: ClientAsync, scheduler_name: str):
response = await client.unschedule_scheduler(scheduler_name=scheduler_name)
assert (
response.status_code == 200
), f"failed to unschedule scheduler ({scheduler_name})"
async def schedule_action(client: ClientAsync, scheduler_name: str, action_name: str):
""" schedule an action """
response = await client.schedule_action(
scheduler_name=scheduler_name, action_name=action_name
)
assert (
response.status_code == 200
), f"failed to schedule action ({action_name}) using scheduler ({scheduler_name})"
async def defer_action(
client: ClientAsync, scheduler_name: str, action_name: str, wait_until: DateTime
):
""" defer an action """
response = await client.defer_action(
scheduler_name=scheduler_name, action_name=action_name, wait_until=wait_until
)
assert (
response.status_code == 200
), f"failed to defer action ({action_name}) using scheduler ({scheduler_name})"
async def expire_action(
client: ClientAsync, scheduler_name: str, action_name: str, expire_on: DateTime
):
""" expire an action """
response = await client.expire_action(
scheduler_name=scheduler_name, action_name=action_name, expire_on=expire_on
)
assert (
response.status_code == 200
), f"failed to expire action ({action_name}) using scheduler ({scheduler_name})"
async def load_dispatcher(client: ClientAsync):
""" return the saved dispatcher """
retrieved_dispatcher = await client.load_dispatcher()
assert isinstance(retrieved_dispatcher, Dispatcher), str(type(retrieved_dispatcher))
return retrieved_dispatcher
async def get_saved_dir(client: ClientAsync):
""" return saved_dir """
retrieved_file_path = await client.get_saved_dir()
assert isinstance(retrieved_file_path, FilePathe), str(type(retrieved_file_path))
return retrieved_file_path
async def replace_dispatcher(client: ClientAsync, replacement: Dispatcher):
"""
replace a dispatcher
"""
response = await client.replace_dispatcher(replacement)
assert (
response.status_code == 200
), f"failed to replace the dispatcher ({response.json()}"
async def unschedule_all(client: ClientAsync):
"""
unschedule all schedulers and actions
"""
response = await client.unschedule_all_schedulers()
assert response.status_code == 200, "failed to unschedule all schedulers"
async def clear_all_scheduling(client: ClientAsync):
"""
unschedule all schedulers and actions
"""
response = await client.clear_all_scheduling()
assert response.status_code == 200, "failed to clear all scheduling"
async def reschedule_all(client: ClientAsync):
"""
reschedule all schedulers and actions
"""
response = await client.reschedule_all_schedulers()
assert response.status_code == 200, "failed to reschedule all schedulers"
async def reset_dispatcher(client: ClientAsync, tmp_dir: str):
"""
usage: reset_dispatcher(client, str(tmp_path))
"""
# set saved_dir to fixture's tmp_path (see usage)
saved_dir = FilePathe(path=tmp_dir)
response = await client.set_saved_dir(saved_dir=saved_dir)
assert response.status_code == 200, "failed to set saved_dir"
saved_saved_dir = await client.get_saved_dir()
assert saved_saved_dir == saved_dir
# empty the dispatcher and stop the jobs if they're running
# for uvicorn tests to run in a bunch, these objects need to be 'reset' since
# evidently the app is shared during the running of the tests
response = await client.clear_dispatcher()
assert response.status_code == 200, "failed to clear dispatcher"
response = await client.stop_jobs() # likely already stopped
async def assert_job_count(client: ClientAsync, n: int):
response = await client.job_count()
assert response.status_code == 200, "failed to retrieve job count"
job_count = response.json()["job_count"]
assert job_count == n, f"expected a job count of ({n})"
async def assert_scheduled_action_count(client: ClientAsync, n: int):
response = await client.scheduled_action_count()
assert response.status_code == 200, "failed to retrieve job count"
action_count = response.json()["action_count"]
assert action_count == n, f"expected an action count of ({n})"
async def assert_deferred_action_count(client: ClientAsync, n: int):
response = await client.deferred_action_count()
assert response.status_code == 200, "failed to retrieve deferred action count"
deferred_action_count = response.json()["deferred_action_count"]
assert deferred_action_count == n, f"expected a deferred action count of ({n})"
async def assert_expiring_action_count(client: ClientAsync, n: int):
response = await client.expiring_action_count()
assert response.status_code == 200, "failed to retrieve expiring action count"
expiring_action_count = response.json()["expiring_action_count"]
assert expiring_action_count == n, f"expected a expiring action count of ({n})"
async def assert_deferred_program_count(client: ClientAsync, n: int):
response = await client.deferred_program_count()
assert response.status_code == 200, "failed to retrieve deferred program count"
deferred_program_count = response.json()["deferred_program_count"]
assert deferred_program_count == n, f"expected a deferred program count of ({n})"
async def run_and_stop_jobs(client: ClientAsync, pause: int):
response = await client.run_jobs()
assert response.status_code == 200, "failed to start jobs"
time.sleep(pause)
response = await client.stop_jobs()
assert response.status_code == 200, "failed to stop jobs"
| 37.7913
| 107
| 0.703099
| 8,968
| 68,629
| 5.174509
| 0.035571
| 0.07163
| 0.04034
| 0.049305
| 0.885314
| 0.863635
| 0.838767
| 0.826204
| 0.805323
| 0.77705
| 0
| 0.012245
| 0.182503
| 68,629
| 1,815
| 108
| 37.812121
| 0.81488
| 0.017791
| 0
| 0.729345
| 0
| 0
| 0.0709
| 0.004869
| 0
| 0
| 0
| 0
| 0.157407
| 1
| 0
| false
| 0
| 0.01567
| 0
| 0.019943
| 0.000712
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
95554e190258085ca6f94d20ab54172208309eed
| 86,335
|
py
|
Python
|
reinterpreted-python/tests/test_syntax.py
|
diana-hep/gawk
|
8528e70f9973b6216b516680141ca443bcf7448b
|
[
"BSD-3-Clause"
] | null | null | null |
reinterpreted-python/tests/test_syntax.py
|
diana-hep/gawk
|
8528e70f9973b6216b516680141ca443bcf7448b
|
[
"BSD-3-Clause"
] | null | null | null |
reinterpreted-python/tests/test_syntax.py
|
diana-hep/gawk
|
8528e70f9973b6216b516680141ca443bcf7448b
|
[
"BSD-3-Clause"
] | null | null | null |
import rejig.pybytecode
from rejig.syntaxtree import *
def check(what_is, what_should_be):
global failed, total
env = {}
if "\n" in what_is or " = " in what_is or "def " in what_is or "print(" in what_is:
exec("def f():\n " + "\n ".join(what_is.split("\n")), env)
else:
exec("def f():\n return " + what_is, env)
ast = rejig.pybytecode.ast(env["f"])
print(str(ast))
assert ast == what_should_be, "\nshould be: " + repr(what_should_be) + "\nyet it is: " + repr(ast)
check('"hello"', Suite((Call('return', Const('hello')),)))
check('''.3''', Suite((Call('return', Const(.3)),)))
check('''-3''', Suite((Call('return', Const(-3)),)))
check('''--3''', Suite((Call('return', Const(--3)),)))
check('''+3''', Suite((Call('return', Const(+3)),)))
check('''++3''', Suite((Call('return', Const(++3)),)))
check('''+-3''', Suite((Call('return', Const(+-3)),)))
check('''3e1''', Suite((Call('return', Const(3e1)),)))
check('''-3e1''', Suite((Call('return', Const(-3e1)),)))
check('''+3e1''', Suite((Call('return', Const(+3e1)),)))
check('0x123', Suite((Call('return', Const(0x123)),)))
check('0o123', Suite((Call('return', Const(0o123)),)))
check('3+4j', Suite((Call('return', Const(3+4j)),)))
check('''[]''', Suite((Call('return', Call('list')),)))
check('''[3]''', Suite((Call('return', Call('list', Const(3))),)))
check('''[3,]''', Suite((Call('return', Call('list', Const(3))),)))
check('''[3, 4]''', Suite((Call('return', Call('list', Const(3), Const(4))),)))
check('''[3, 4,]''', Suite((Call('return', Call('list', Const(3), Const(4))),)))
check('''[3, 4, 5]''', Suite((Call('return', Call('list', Const(3), Const(4), Const(5))),)))
check('''[3, 4, 5,]''', Suite((Call('return', Call('list', Const(3), Const(4), Const(5))),)))
check('''[3, 4, 5, 6]''', Suite((Call('return', Call('list', Const(3), Const(4), Const(5), Const(6))),)))
check('''[3, 4, 5, 6,]''', Suite((Call('return', Call('list', Const(3), Const(4), Const(5), Const(6))),)))
check('''[[1], 2, 3, 4, 5]''', Suite((Call('return', Call('list', Call('list', Const(1)), Const(2), Const(3), Const(4), Const(5))),)))
check('''[[1, 2], 3, 4, 5]''', Suite((Call('return', Call('list', Call('list', Const(1), Const(2)), Const(3), Const(4), Const(5))),)))
check('''[[1, 2, 3], 4, 5]''', Suite((Call('return', Call('list', Call('list', Const(1), Const(2), Const(3)), Const(4), Const(5))),)))
check('''[[1, 2, 3, 4], 5]''', Suite((Call('return', Call('list', Call('list', Const(1), Const(2), Const(3), Const(4)), Const(5))),)))
check('''[[1, 2, 3, 4, 5]]''', Suite((Call('return', Call('list', Call('list', Const(1), Const(2), Const(3), Const(4), Const(5)))),)))
check('''[[[1], 2, 3, 4, 5]]''', Suite((Call('return', Call('list', Call('list', Call('list', Const(1)), Const(2), Const(3), Const(4), Const(5)))),)))
check('''[[[1, 2], 3, 4, 5]]''', Suite((Call('return', Call('list', Call('list', Call('list', Const(1), Const(2)), Const(3), Const(4), Const(5)))),)))
check('''[[[1, 2, 3], 4, 5]]''', Suite((Call('return', Call('list', Call('list', Call('list', Const(1), Const(2), Const(3)), Const(4), Const(5)))),)))
check('''[[[1, 2, 3, 4], 5]]''', Suite((Call('return', Call('list', Call('list', Call('list', Const(1), Const(2), Const(3), Const(4)), Const(5)))),)))
check('''[[[1, 2, 3, 4, 5]]]''', Suite((Call('return', Call('list', Call('list', Call('list', Const(1), Const(2), Const(3), Const(4), Const(5))))),)))
check('''[1, 2, 3, 4, [5]]''', Suite((Call('return', Call('list', Const(1), Const(2), Const(3), Const(4), Call('list', Const(5)))),)))
check('''[1, 2, 3, [4, 5]]''', Suite((Call('return', Call('list', Const(1), Const(2), Const(3), Call('list', Const(4), Const(5)))),)))
check('''[1, 2, [3, 4, 5]]''', Suite((Call('return', Call('list', Const(1), Const(2), Call('list', Const(3), Const(4), Const(5)))),)))
check('''[1, [2, 3, 4, 5]]''', Suite((Call('return', Call('list', Const(1), Call('list', Const(2), Const(3), Const(4), Const(5)))),)))
check('''[[1, 2, 3, 4, [5]]]''', Suite((Call('return', Call('list', Call('list', Const(1), Const(2), Const(3), Const(4), Call('list', Const(5))))),)))
check('''[[1, 2, 3, [4, 5]]]''', Suite((Call('return', Call('list', Call('list', Const(1), Const(2), Const(3), Call('list', Const(4), Const(5))))),)))
check('''[[1, 2, [3, 4, 5]]]''', Suite((Call('return', Call('list', Call('list', Const(1), Const(2), Call('list', Const(3), Const(4), Const(5))))),)))
check('''[[1, [2, 3, 4, 5]]]''', Suite((Call('return', Call('list', Call('list', Const(1), Call('list', Const(2), Const(3), Const(4), Const(5))))),)))
check('''x = (None)''', Suite((Assign((Name('x'),), Const(None)), Call('return', Const(None)),)))
check('''x = (3, None)''', Suite((Assign((Name('x'),), Call('tuple', Const(3), Const(None))), Call('return', Const(None)),)))
check('''x = (3, 4, None)''', Suite((Assign((Name('x'),), Call('tuple', Const(3), Const(4), Const(None))), Call('return', Const(None)),)))
check('''x = (3, 4, 5, None)''', Suite((Assign((Name('x'),), Call('tuple', Const(3), Const(4), Const(5), Const(None))), Call('return', Const(None)),)))
check('''x = (3, 4, 5, 6, None)''', Suite((Assign((Name('x'),), Call('tuple', Const(3), Const(4), Const(5), Const(6), Const(None))), Call('return', Const(None)),)))
check('''x = ((1, None), 2, 3, 4, 5, None)''', Suite((Assign((Name('x'),), Call('tuple', Call('tuple', Const(1), Const(None)), Const(2), Const(3), Const(4), Const(5), Const(None))), Call('return', Const(None)),)))
check('''x = ((1, 2, None), 3, 4, 5, None)''', Suite((Assign((Name('x'),), Call('tuple', Call('tuple', Const(1), Const(2), Const(None)), Const(3), Const(4), Const(5), Const(None))), Call('return', Const(None)),)))
check('''x = ((1, 2, 3, None), 4, 5, None)''', Suite((Assign((Name('x'),), Call('tuple', Call('tuple', Const(1), Const(2), Const(3), Const(None)), Const(4), Const(5), Const(None))), Call('return', Const(None)),)))
check('''x = ((1, 2, 3, 4, None), 5, None)''', Suite((Assign((Name('x'),), Call('tuple', Call('tuple', Const(1), Const(2), Const(3), Const(4), Const(None)), Const(5), Const(None))), Call('return', Const(None)),)))
check('''x = ((1, 2, 3, 4, 5, None), None)''', Suite((Assign((Name('x'),), Call('tuple', Call('tuple', Const(1), Const(2), Const(3), Const(4), Const(5), Const(None)), Const(None))), Call('return', Const(None)),)))
check('''x = (((1, None), 2, 3, 4, 5, None), None)''', Suite((Assign((Name('x'),), Call('tuple', Call('tuple', Call('tuple', Const(1), Const(None)), Const(2), Const(3), Const(4), Const(5), Const(None)), Const(None))), Call('return', Const(None)),)))
check('''x = (((1, 2, None), 3, 4, 5, None), None)''', Suite((Assign((Name('x'),), Call('tuple', Call('tuple', Call('tuple', Const(1), Const(2), Const(None)), Const(3), Const(4), Const(5), Const(None)), Const(None))), Call('return', Const(None)),)))
check('''x = (((1, 2, 3, None), 4, 5, None), None)''', Suite((Assign((Name('x'),), Call('tuple', Call('tuple', Call('tuple', Const(1), Const(2), Const(3), Const(None)), Const(4), Const(5), Const(None)), Const(None))), Call('return', Const(None)),)))
check('''x = (((1, 2, 3, 4, None), 5, None), None)''', Suite((Assign((Name('x'),), Call('tuple', Call('tuple', Call('tuple', Const(1), Const(2), Const(3), Const(4), Const(None)), Const(5), Const(None)), Const(None))), Call('return', Const(None)),)))
check('''x = (((1, 2, 3, 4, 5, None), None), None)''', Suite((Assign((Name('x'),), Call('tuple', Call('tuple', Call('tuple', Const(1), Const(2), Const(3), Const(4), Const(5), Const(None)), Const(None)), Const(None))), Call('return', Const(None)),)))
check('''x = (1, 2, 3, 4, (5, None), None)''', Suite((Assign((Name('x'),), Call('tuple', Const(1), Const(2), Const(3), Const(4), Call('tuple', Const(5), Const(None)), Const(None))), Call('return', Const(None)),)))
check('''x = (1, 2, 3, (4, 5, None), None)''', Suite((Assign((Name('x'),), Call('tuple', Const(1), Const(2), Const(3), Call('tuple', Const(4), Const(5), Const(None)), Const(None))), Call('return', Const(None)),)))
check('''x = (1, 2, (3, 4, 5, None), None)''', Suite((Assign((Name('x'),), Call('tuple', Const(1), Const(2), Call('tuple', Const(3), Const(4), Const(5), Const(None)), Const(None))), Call('return', Const(None)),)))
check('''x = (1, (2, 3, 4, 5, None), None)''', Suite((Assign((Name('x'),), Call('tuple', Const(1), Call('tuple', Const(2), Const(3), Const(4), Const(5), Const(None)), Const(None))), Call('return', Const(None)),)))
check('''x = ((1, 2, 3, 4, (5, None), None), None)''', Suite((Assign((Name('x'),), Call('tuple', Call('tuple', Const(1), Const(2), Const(3), Const(4), Call('tuple', Const(5), Const(None)), Const(None)), Const(None))), Call('return', Const(None)),)))
check('''x = ((1, 2, 3, (4, 5, None), None), None)''', Suite((Assign((Name('x'),), Call('tuple', Call('tuple', Const(1), Const(2), Const(3), Call('tuple', Const(4), Const(5), Const(None)), Const(None)), Const(None))), Call('return', Const(None)),)))
check('''x = ((1, 2, (3, 4, 5, None), None), None)''', Suite((Assign((Name('x'),), Call('tuple', Call('tuple', Const(1), Const(2), Call('tuple', Const(3), Const(4), Const(5), Const(None)), Const(None)), Const(None))), Call('return', Const(None)),)))
check('''x = ((1, (2, 3, 4, 5, None), None), None)''', Suite((Assign((Name('x'),), Call('tuple', Call('tuple', Const(1), Call('tuple', Const(2), Const(3), Const(4), Const(5), Const(None)), Const(None)), Const(None))), Call('return', Const(None)),)))
check('''3
''', Suite((Call('return', Const(None)),))) # hey look: Python does dead code removal!
check('''3
''', Suite((Call('return', Const(None)),)))
check('''3
''', Suite((Call('return', Const(None)),)))
check('''3
''', Suite((Call('return', Const(None)),)))
check('''
3''', Suite((Call('return', Const(None)),)))
check('''
3''', Suite((Call('return', Const(None)),)))
check('''
3''', Suite((Call('return', Const(None)),)))
check('''
3''', Suite((Call('return', Const(None)),)))
check('''a''', Suite((Call('return', Name('a')),)))
check('''a.b''', Suite((Call('return', Call('.', Name('a'), 'b')),)))
check('''a.b.c''', Suite((Call('return', Call('.', Call('.', Name('a'), 'b'), 'c')),)))
check('''a.b.c.d''', Suite((Call('return', Call('.', Call('.', Call('.', Name('a'), 'b'), 'c'), 'd')),)))
check('''a.b.c.d.e''', Suite((Call('return', Call('.', Call('.', Call('.', Call('.', Name('a'), 'b'), 'c'), 'd'), 'e')),)))
check('''a[1]''', Suite((Call('return', Call('[.]', Name('a'), Const(1))),)))
check('''a[1][2]''', Suite((Call('return', Call('[.]', Call('[.]', Name('a'), Const(1)), Const(2))),)))
check('''a[1][2][3]''', Suite((Call('return', Call('[.]', Call('[.]', Call('[.]', Name('a'), Const(1)), Const(2)), Const(3))),)))
check('''a[1][2][3][4]''', Suite((Call('return', Call('[.]', Call('[.]', Call('[.]', Call('[.]', Name('a'), Const(1)), Const(2)), Const(3)), Const(4))),)))
check('''(9, None).stuff''', Suite((Call('return', Call('.', Call('tuple', Const(9), Const(None)), 'stuff')),)))
check('''((9, None), None).stuff''', Suite((Call('return', Call('.', Call('tuple', Call('tuple', Const(9), Const(None)), Const(None)), 'stuff')),)))
check('''(((9, None), None), None).stuff''', Suite((Call('return', Call('.', Call('tuple', Call('tuple', Call('tuple', Const(9), Const(None)), Const(None)), Const(None)), 'stuff')),)))
check('''a[1]''', Suite((Call('return', Call('[.]', Name('a'), Const(1))),)))
check('''a["hey"]''', Suite((Call('return', Call('[.]', Name('a'), Const('hey'))),)))
check('''a[1:2]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(None)))),)))
check('''a[:]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(None)))),)))
check('''a[1:]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(None)))),)))
check('''a[:1]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(None)))),)))
check('''a[::]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(None)))),)))
check('''a[1::]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(None)))),)))
check('''a[:1:]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(None)))),)))
check('''a[::1]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(1)))),)))
check('''a[1:2:]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(None)))),)))
check('''a[:1:2]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(2)))),)))
check('''a[1::2]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(2)))),)))
check('''a[1:2:3]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(3)))),)))
check('''a[1,]''', Suite((Call('return', Call('[.]', Name('a'), Const(1))),)))
check('''a["hey",]''', Suite((Call('return', Call('[.]', Name('a'), Const('hey'))),)))
check('''a[1:2,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(None)))),)))
check('''a[:,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(None)))),)))
check('''a[1:,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(None)))),)))
check('''a[:1,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(None)))),)))
check('''a[::,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(None)))),)))
check('''a[1::,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(None)))),)))
check('''a[:1:,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(None)))),)))
check('''a[::1,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(1)))),)))
check('''a[1:2:,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(None)))),)))
check('''a[:1:2,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(2)))),)))
check('''a[1::2,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(2)))),)))
check('''a[1:2:3,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(3)))),)))
check('''a[1,5]''', Suite((Call('return', Call('[.]', Name('a'), Const(1), Const(5))),)))
check('''a["hey",5]''', Suite((Call('return', Call('[.]', Name('a'), Const('hey'), Const(5))),)))
check('''a[1:2,5]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(None)), Const(5))),)))
check('''a[:,5]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(None)), Const(5))),)))
check('''a[1:,5]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(None)), Const(5))),)))
check('''a[:1,5]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(None)), Const(5))),)))
check('''a[::,5]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(None)), Const(5))),)))
check('''a[1::,5]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(None)), Const(5))),)))
check('''a[:1:,5]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(None)), Const(5))),)))
check('''a[::1,5]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(1)), Const(5))),)))
check('''a[1:2:,5]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(None)), Const(5))),)))
check('''a[:1:2,5]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(2)), Const(5))),)))
check('''a[1::2,5]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(2)), Const(5))),)))
check('''a[1:2:3,5]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(3)), Const(5))),)))
check('''a[1,5,]''', Suite((Call('return', Call('[.]', Name('a'), Const(1), Const(5))),)))
check('''a["hey",5,]''', Suite((Call('return', Call('[.]', Name('a'), Const('hey'), Const(5))),)))
check('''a[1:2,5,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(None)), Const(5))),)))
check('''a[:,5,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(None)), Const(5))),)))
check('''a[1:,5,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(None)), Const(5))),)))
check('''a[:1,5,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(None)), Const(5))),)))
check('''a[::,5,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(None)), Const(5))),)))
check('''a[1::,5,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(None)), Const(5))),)))
check('''a[:1:,5,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(None)), Const(5))),)))
check('''a[::1,5,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(1)), Const(5))),)))
check('''a[1:2:,5,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(None)), Const(5))),)))
check('''a[:1:2,5,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(2)), Const(5))),)))
check('''a[1::2,5,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(2)), Const(5))),)))
check('''a[1:2:3,5,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(3)), Const(5))),)))
check('''a[1,"a":"b"]''', Suite((Call('return', Call('[.]', Name('a'), Const(1), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a["hey","a":"b"]''', Suite((Call('return', Call('[.]', Name('a'), Const('hey'), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[1:2,"a":"b"]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(1), Const(2), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[:,"a":"b"]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(None), Const(None), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[1:,"a":"b"]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(1), Const(None), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[:1,"a":"b"]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(None), Const(1), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[::,"a":"b"]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(None), Const(None), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[1::,"a":"b"]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(1), Const(None), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[:1:,"a":"b"]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(None), Const(1), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[::1,"a":"b"]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(None), Const(None), Const(1)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[1:2:,"a":"b"]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(1), Const(2), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[:1:2,"a":"b"]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(None), Const(1), Const(2)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[1::2,"a":"b"]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(1), Const(None), Const(2)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[1:2:3,"a":"b"]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(1), Const(2), Const(3)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[1,"a":"b",]''', Suite((Call('return', Call('[.]', Name('a'), Const(1), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a["hey","a":"b",]''', Suite((Call('return', Call('[.]', Name('a'), Const('hey'), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[1:2,"a":"b",]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(1), Const(2), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[:,"a":"b",]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(None), Const(None), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[1:,"a":"b",]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(1), Const(None), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[:1,"a":"b",]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(None), Const(1), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[::,"a":"b",]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(None), Const(None), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[1::,"a":"b",]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(1), Const(None), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[:1:,"a":"b",]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(None), Const(1), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[::1,"a":"b",]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(None), Const(None), Const(1)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[1:2:,"a":"b",]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(1), Const(2), Const(None)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[:1:2,"a":"b",]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(None), Const(1), Const(2)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[1::2,"a":"b",]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(1), Const(None), Const(2)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[1:2:3,"a":"b",]''', Suite((Call('return', Call('[.]', Name('a'), Call("slice", Const(1), Const(2), Const(3)), Call('slice', Const('a'), Const('b'), Const(None)))),)))
check('''a[1,5,6]''', Suite((Call('return', Call('[.]', Name('a'), Const(1), Const(5), Const(6))),)))
check('''a["hey",5,6]''', Suite((Call('return', Call('[.]', Name('a'), Const('hey'), Const(5), Const(6))),)))
check('''a[1:2,5,6]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(None)), Const(5), Const(6))),)))
check('''a[:,5,6]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(None)), Const(5), Const(6))),)))
check('''a[1:,5,6]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(None)), Const(5), Const(6))),)))
check('''a[:1,5,6]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(None)), Const(5), Const(6))),)))
check('''a[::,5,6]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(None)), Const(5), Const(6))),)))
check('''a[1::,5,6]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(None)), Const(5), Const(6))),)))
check('''a[:1:,5,6]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(None)), Const(5), Const(6))),)))
check('''a[::1,5,6]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(1)), Const(5), Const(6))),)))
check('''a[1:2:,5,6]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(None)), Const(5), Const(6))),)))
check('''a[:1:2,5,6]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(2)), Const(5), Const(6))),)))
check('''a[1::2,5,6]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(2)), Const(5), Const(6))),)))
check('''a[1:2:3,5,6]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(3)), Const(5), Const(6))),)))
check('''a[1,5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Const(1), Const(5), Const(6))),)))
check('''a["hey",5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Const('hey'), Const(5), Const(6))),)))
check('''a[1:2,5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(None)), Const(5), Const(6))),)))
check('''a[:,5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(None)), Const(5), Const(6))),)))
check('''a[1:,5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(None)), Const(5), Const(6))),)))
check('''a[:1,5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(None)), Const(5), Const(6))),)))
check('''a[::,5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(None)), Const(5), Const(6))),)))
check('''a[1::,5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(None)), Const(5), Const(6))),)))
check('''a[:1:,5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(None)), Const(5), Const(6))),)))
check('''a[::1,5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(None), Const(1)), Const(5), Const(6))),)))
check('''a[1:2:,5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(None)), Const(5), Const(6))),)))
check('''a[:1:2,5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(None), Const(1), Const(2)), Const(5), Const(6))),)))
check('''a[1::2,5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(None), Const(2)), Const(5), Const(6))),)))
check('''a[1:2:3,5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Const(2), Const(3)), Const(5), Const(6))),)))
check('''a[1:[2]:3,[],5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Call('list', Const(2)), Const(3)), Call('list'), Const(5), Const(6))),)))
check('''a[1:[[2]]:3,[[]],5,6,]''', Suite((Call('return', Call('[.]', Name('a'), Call('slice', Const(1), Call('list', Call('list', Const(2))), Const(3)), Call('list', Call('list')), Const(5), Const(6))),)))
check('''a[2].three''', Suite((Call('return', Call('.', Call('[.]', Name('a'), Const(2)), 'three')),)))
check('''a.three''', Suite((Call('return', Call('.', Name('a'), 'three')),)))
check('''a[2]''', Suite((Call('return', Call('[.]', Name('a'), Const(2))),)))
check('''a.three[2]''', Suite((Call('return', Call('[.]', Call('.', Name('a'), 'three'), Const(2))),)))
check('''x and y''', Suite((Call('return', Call('and', Name('x'), Name('y'))),)))
check('''x and y and z''', Suite((Call('return', Call('and', Name('x'), Call('and', Name('y'), Name('z')))),)))
check('''x and y and z and w''', Suite((Call('return', Call('and', Name('x'), Call('and', Name('y'), Call('and', Name('z'), Name('w'))))),)))
check('''not x''', Suite((Call('return', Call('not', Name('x'))),)))
check('''not x and y''', Suite((Call('return', Call('and', Call('not', Name('x')), Name('y'))),)))
check('''x or y''', Suite((Call('return', Call('or', Name('x'), Name('y'))),)))
check('''x or y and z''', Suite((Call('return', Call('or', Name('x'), Call('and', Name('y'), Name('z')))),)))
check('''x or y or z''', Suite((Call('return', Call('or', Name('x'), Call('or', Name('y'), Name('z')))),)))
check('''not x or y and z''', Suite((Call('return', Call('or', Call('not', Name('x')), Call('and', Name('y'), Name('z')))),)))
check('''x or not y and z''', Suite((Call('return', Call('or', Name('x'), Call('and', Call('not', Name('y')), Name('z')))),)))
check('''x or y and not z''', Suite((Call('return', Call('or', Name('x'), Call('and', Name('y'), Call('not', Name('z'))))),)))
check('''not x or not y and z''', Suite((Call('return', Call('or', Call('not', Name('x')), Call('and', Call('not', Name('y')), Name('z')))),)))
check('''not x or y and not z''', Suite((Call('return', Call('or', Call('not', Name('x')), Call('and', Name('y'), Call('not', Name('z'))))),)))
check('''x or not y and not z''', Suite((Call('return', Call('or', Name('x'), Call('and', Call('not', Name('y')), Call('not', Name('z'))))),)))
check('''not x or not y and not z''', Suite((Call('return', Call('or', Call('not', Name('x')), Call('and', Call('not', Name('y')), Call('not', Name('z'))))),)))
check('''x and y or z''', Suite((Call('return', Call('or', Call('and', Name('x'), Name('y')), Name('z'))),)))
check('''not x and y or z''', Suite((Call('return', Call('or', Call('and', Call('not', Name('x')), Name('y')), Name('z'))),)))
check('''x and not y or z''', Suite((Call('return', Call('or', Call('and', Name('x'), Call('not', Name('y'))), Name('z'))),)))
check('''x and y or not z''', Suite((Call('return', Call('or', Call('and', Name('x'), Name('y')), Call('not', Name('z')))),)))
check('''not x and not y or z''', Suite((Call('return', Call('or', Call('and', Call('not', Name('x')), Call('not', Name('y'))), Name('z'))),)))
check('''not x and y or not z''', Suite((Call('return', Call('or', Call('and', Call('not', Name('x')), Name('y')), Call('not', Name('z')))),)))
check('''x and not y or not z''', Suite((Call('return', Call('or', Call('and', Name('x'), Call('not', Name('y'))), Call('not', Name('z')))),)))
check('''x < y''', Suite((Call('return', Call('<', Name('x'), Name('y'))),)))
check('''x > y''', Suite((Call('return', Call('>', Name('x'), Name('y'))),)))
check('''x == y''', Suite((Call('return', Call('==', Name('x'), Name('y'))),)))
check('''x >= y''', Suite((Call('return', Call('>=', Name('x'), Name('y'))),)))
check('''x <= y''', Suite((Call('return', Call('<=', Name('x'), Name('y'))),)))
check('''x != y''', Suite((Call('return', Call('!=', Name('x'), Name('y'))),)))
check('''x in y''', Suite((Call('return', Call('in', Name('x'), Name('y'))),)))
check('''x not in y''', Suite((Call('return', Call('not-in', Name('x'), Name('y'))),)))
check('''1 < y < 2''', Suite((Call('return', Call('and', Call('<', Const(1), Name('y')), Call('<', Name('y'), Const(2)))),)))
check('''1 < y == 2''', Suite((Call('return', Call('and', Call('<', Const(1), Name('y')), Call('==', Name('y'), Const(2)))),)))
check('''(x, None) < y''', Suite((Call('return', Call('<', Call('tuple', Name('x'), Const(None)), Name('y'))),)))
check('''(x, None) > y''', Suite((Call('return', Call('>', Call('tuple', Name('x'), Const(None)), Name('y'))),)))
check('''(x, None) == y''', Suite((Call('return', Call('==', Call('tuple', Name('x'), Const(None)), Name('y'))),)))
check('''(x, None) >= y''', Suite((Call('return', Call('>=', Call('tuple', Name('x'), Const(None)), Name('y'))),)))
check('''(x, None) <= y''', Suite((Call('return', Call('<=', Call('tuple', Name('x'), Const(None)), Name('y'))),)))
check('''(x, None) != y''', Suite((Call('return', Call('!=', Call('tuple', Name('x'), Const(None)), Name('y'))),)))
check('''(x, None) in y''', Suite((Call('return', Call('in', Call('tuple', Name('x'), Const(None)), Name('y'))),)))
check('''(x, None) not in y''', Suite((Call('return', Call('not-in', Call('tuple', Name('x'), Const(None)), Name('y'))),)))
check('''(1, None) < y < 2''', Suite((Call('return', Call('and', Call('<', Call('tuple', Const(1), Const(None)), Name('y')), Call('<', Name('y'), Const(2)))),)))
check('''(1, None) < y == 2''', Suite((Call('return', Call('and', Call('<', Call('tuple', Const(1), Const(None)), Name('y')), Call('==', Name('y'), Const(2)))),)))
check('''x < (y, None)''', Suite((Call('return', Call('<', Name('x'), Call('tuple', Name('y'), Const(None)))),)))
check('''x > (y, None)''', Suite((Call('return', Call('>', Name('x'), Call('tuple', Name('y'), Const(None)))),)))
check('''x == (y, None)''', Suite((Call('return', Call('==', Name('x'), Call('tuple', Name('y'), Const(None)))),)))
check('''x >= (y, None)''', Suite((Call('return', Call('>=', Name('x'), Call('tuple', Name('y'), Const(None)))),)))
check('''x <= (y, None)''', Suite((Call('return', Call('<=', Name('x'), Call('tuple', Name('y'), Const(None)))),)))
check('''x != (y, None)''', Suite((Call('return', Call('!=', Name('x'), Call('tuple', Name('y'), Const(None)))),)))
check('''x in (y, None)''', Suite((Call('return', Call('in', Name('x'), Call('tuple', Name('y'), Const(None)))),)))
check('''x not in (y, None)''', Suite((Call('return', Call('not-in', Name('x'), Call('tuple', Name('y'), Const(None)))),)))
check('''1 < (y, None) < 2''', Suite((Call('return', Call('and', Call('<', Const(1), Call('tuple', Name('y'), Const(None))), Call('<', Call('tuple', Name('y'), Const(None)), Const(2)))),)))
check('''1 < (y, None) == 2''', Suite((Call('return', Call('and', Call('<', Const(1), Call('tuple', Name('y'), Const(None))), Call('==', Call('tuple', Name('y'), Const(None)), Const(2)))),)))
check('''1 < y < (2, None)''', Suite((Call('return', Call('and', Call('<', Const(1), Name('y')), Call('<', Name('y'), Call('tuple', Const(2), Const(None))))),)))
check('''1 < y == (2, None)''', Suite((Call('return', Call('and', Call('<', Const(1), Name('y')), Call('==', Name('y'), Call('tuple', Const(2), Const(None))))),)))
check('''(x, None) < (y, None)''', Suite((Call('return', Call('<', Call('tuple', Name('x'), Const(None)), Call('tuple', Name('y'), Const(None)))),)))
check('''(x, None) > (y, None)''', Suite((Call('return', Call('>', Call('tuple', Name('x'), Const(None)), Call('tuple', Name('y'), Const(None)))),)))
check('''(x, None) == (y, None)''', Suite((Call('return', Call('==', Call('tuple', Name('x'), Const(None)), Call('tuple', Name('y'), Const(None)))),)))
check('''(x, None) >= (y, None)''', Suite((Call('return', Call('>=', Call('tuple', Name('x'), Const(None)), Call('tuple', Name('y'), Const(None)))),)))
check('''(x, None) <= (y, None)''', Suite((Call('return', Call('<=', Call('tuple', Name('x'), Const(None)), Call('tuple', Name('y'), Const(None)))),)))
check('''(x, None) != (y, None)''', Suite((Call('return', Call('!=', Call('tuple', Name('x'), Const(None)), Call('tuple', Name('y'), Const(None)))),)))
check('''(x, None) in (y, None)''', Suite((Call('return', Call('in', Call('tuple', Name('x'), Const(None)), Call('tuple', Name('y'), Const(None)))),)))
check('''(x, None) not in (y, None)''', Suite((Call('return', Call('not-in', Call('tuple', Name('x'), Const(None)), Call('tuple', Name('y'), Const(None)))),)))
check('''(1, None) < (y, None) < 2''', Suite((Call('return', Call('and', Call('<', Call('tuple', Const(1), Const(None)), Call('tuple', Name('y'), Const(None))), Call('<', Call('tuple', Name('y'), Const(None)), Const(2)))),)))
check('''(1, None) < (y, None) == 2''', Suite((Call('return', Call('and', Call('<', Call('tuple', Const(1), Const(None)), Call('tuple', Name('y'), Const(None))), Call('==', Call('tuple', Name('y'), Const(None)), Const(2)))),)))
check('''(1, None) < y < (2, None)''', Suite((Call('return', Call('and', Call('<', Call('tuple', Const(1), Const(None)), Name('y')), Call('<', Name('y'), Call('tuple', Const(2), Const(None))))),)))
check('''(1, None) < y == (2, None)''', Suite((Call('return', Call('and', Call('<', Call('tuple', Const(1), Const(None)), Name('y')), Call('==', Name('y'), Call('tuple', Const(2), Const(None))))),)))
check('''x + y''', Suite((Call('return', Call('+', Name('x'), Name('y'))),)))
check('''x + y + z''', Suite((Call('return', Call('+', Call('+', Name('x'), Name('y')), Name('z'))),)))
check('''x + y + z + w''', Suite((Call('return', Call('+', Call('+', Call('+', Name('x'), Name('y')), Name('z')), Name('w'))),)))
check('''x - y''', Suite((Call('return', Call('-', Name('x'), Name('y'))),)))
check('''x - y - z''', Suite((Call('return', Call('-', Call('-', Name('x'), Name('y')), Name('z'))),)))
check('''x - y - z - w''', Suite((Call('return', Call('-', Call('-', Call('-', Name('x'), Name('y')), Name('z')), Name('w'))),)))
check('''x - y + z - w''', Suite((Call('return', Call('-', Call('+', Call('-', Name('x'), Name('y')), Name('z')), Name('w'))),)))
check('''x * y''', Suite((Call('return', Call('*', Name('x'), Name('y'))),)))
check('''x * y * z''', Suite((Call('return', Call('*', Call('*', Name('x'), Name('y')), Name('z'))),)))
check('''x * y * z * w''', Suite((Call('return', Call('*', Call('*', Call('*', Name('x'), Name('y')), Name('z')), Name('w'))),)))
check('''x * y - z * w''', Suite((Call('return', Call('-', Call('*', Name('x'), Name('y')), Call('*', Name('z'), Name('w')))),)))
check('''x / y''', Suite((Call('return', Call('/', Name('x'), Name('y'))),)))
check('''x / y / z''', Suite((Call('return', Call('/', Call('/', Name('x'), Name('y')), Name('z'))),)))
check('''x / y / z / w''', Suite((Call('return', Call('/', Call('/', Call('/', Name('x'), Name('y')), Name('z')), Name('w'))),)))
check('''x / y * z / w''', Suite((Call('return', Call('/', Call('*', Call('/', Name('x'), Name('y')), Name('z')), Name('w'))),)))
check('''x % y''', Suite((Call('return', Call('%', Name('x'), Name('y'))),)))
check('''x % y % z''', Suite((Call('return', Call('%', Call('%', Name('x'), Name('y')), Name('z'))),)))
check('''x % y % z % w''', Suite((Call('return', Call('%', Call('%', Call('%', Name('x'), Name('y')), Name('z')), Name('w'))),)))
check('''x % y / z % w''', Suite((Call('return', Call('%', Call('/', Call('%', Name('x'), Name('y')), Name('z')), Name('w'))),)))
check('''x // y''', Suite((Call('return', Call('//', Name('x'), Name('y'))),)))
check('''x // y // z''', Suite((Call('return', Call('//', Call('//', Name('x'), Name('y')), Name('z'))),)))
check('''x // y // z // w''', Suite((Call('return', Call('//', Call('//', Call('//', Name('x'), Name('y')), Name('z')), Name('w'))),)))
check('''x // y % z // w''', Suite((Call('return', Call('//', Call('%', Call('//', Name('x'), Name('y')), Name('z')), Name('w'))),)))
check('''+x''', Suite((Call('return', Call('u+', Name('x'))),)))
check('''-x''', Suite((Call('return', Call('u-', Name('x'))),)))
check('''++x''', Suite((Call('return', Call('u+', Call('u+', Name('x')))),)))
check('''+-x''', Suite((Call('return', Call('u+', Call('u-', Name('x')))),)))
check('''-+x''', Suite((Call('return', Call('u-', Call('u+', Name('x')))),)))
check('''--x''', Suite((Call('return', Call('u-', Call('u-', Name('x')))),)))
check('''+x + y''', Suite((Call('return', Call('+', Call('u+', Name('x')), Name('y'))),)))
check('''-x + y''', Suite((Call('return', Call('+', Call('u-', Name('x')), Name('y'))),)))
check('''++x + y''', Suite((Call('return', Call('+', Call('u+', Call('u+', Name('x'))), Name('y'))),)))
check('''+-x + y''', Suite((Call('return', Call('+', Call('u+', Call('u-', Name('x'))), Name('y'))),)))
check('''-+x + y''', Suite((Call('return', Call('+', Call('u-', Call('u+', Name('x'))), Name('y'))),)))
check('''--x + y''', Suite((Call('return', Call('+', Call('u-', Call('u-', Name('x'))), Name('y'))),)))
check('''x + +x''', Suite((Call('return', Call('+', Name('x'), Call('u+', Name('x')))),)))
check('''x + -x''', Suite((Call('return', Call('+', Name('x'), Call('u-', Name('x')))),)))
check('''x + ++x''', Suite((Call('return', Call('+', Name('x'), Call('u+', Call('u+', Name('x'))))),)))
check('''x + +-x''', Suite((Call('return', Call('+', Name('x'), Call('u+', Call('u-', Name('x'))))),)))
check('''x + -+x''', Suite((Call('return', Call('+', Name('x'), Call('u-', Call('u+', Name('x'))))),)))
check('''x + --x''', Suite((Call('return', Call('+', Name('x'), Call('u-', Call('u-', Name('x'))))),)))
check('''x ** y''', Suite((Call('return', Call('**', Name('x'), Name('y'))),)))
check('''x ** y ** z''', Suite((Call('return', Call('**', Name('x'), Call('**', Name('y'), Name('z')))),)))
check('''x ** y ** z ** w''', Suite((Call('return', Call('**', Name('x'), Call('**', Name('y'), Call('**', Name('z'), Name('w'))))),)))
check('''x ** y // z ** w''', Suite((Call('return', Call('//', Call('**', Name('x'), Name('y')), Call('**', Name('z'), Name('w')))),)))
check('''x.y**2''', Suite((Call('return', Call('**', Call('.', Name('x'), 'y'), Const(2))),)))
check('f(None)', Suite((Call('return', Call(Name('f'), Const(None))),)))
check('f(x, None)', Suite((Call('return', Call(Name('f'), Name('x'), Const(None))),)))
check('f(x, y, None)', Suite((Call('return', Call(Name('f'), Name('x'), Name('y'), Const(None))),)))
check('f(x, y, z, None)', Suite((Call('return', Call(Name('f'), Name('x'), Name('y'), Name('z'), Const(None))),)))
check('f(x=1)', Suite((Call('return', CallKeyword(Name('f'), (), (('x', Const(1)),))),)))
check('f(x, y=1)', Suite((Call('return', CallKeyword(Name('f'), (Name('x'),), (('y', Const(1)),))),)))
check('f(x, y, z=1)', Suite((Call('return', CallKeyword(Name('f'), (Name('x'), Name('y'),), (('z', Const(1)),))),)))
check('x = 1; x', Suite((Assign((Name('x'),), Const(1)), Name('x'), Call('return', Const(None)),)))
check('x = 1; x;', Suite((Assign((Name('x'),), Const(1)), Name('x'), Call('return', Const(None)),)))
check('x, = 1; x', Suite((Assign((Unpack((Name('x'),)),), Const(1)), Name('x'), Call('return', Const(None)),)))
check('x, y = 1; x', Suite((Assign((Unpack((Name('x'), Name('y'))),), Const(1)), Name('x'), Call('return', Const(None)),)))
check('x, y, = 1; x', Suite((Assign((Unpack((Name('x'), Name('y'))),), Const(1)), Name('x'), Call('return', Const(None)),)))
check('x, y, z = 1; x', Suite((Assign((Unpack((Name('x'), Name('y'), Name('z'))),), Const(1)), Name('x'), Call('return', Const(None)),)))
check('x, y, z, = 1; x', Suite((Assign((Unpack((Name('x'), Name('y'), Name('z'))),), Const(1)), Name('x'), Call('return', Const(None)),)))
check("False", Suite((Call('return', Const(False)),)))
check("True", Suite((Call('return', Const(True)),)))
check("not x", Suite((Call('return', Call('not', Name('x'))),)))
check("not x and not y", Suite((Call('return', Call('and', Call('not', Name('x')), Call('not', Name('y')))),)))
check("not x and not y and not z", Suite((Call('return', Call('and', Call('not', Name('x')), Call('and', Call('not', Name('y')), Call('not', Name('z'))))),)))
check("not x and not y and not z", Suite((Call('return', Call('and', Call('not', Name('x')), Call('and', Call('not', Name('y')), Call('not', Name('z'))))),)))
check("not x and not y and not z", Suite((Call('return', Call('and', Call('not', Name('x')), Call('and', Call('not', Name('y')), Call('not', Name('z'))))),)))
check("not x or not y", Suite((Call('return', Call('or', Call('not', Name('x')), Call('not', Name('y')))),)))
check("not x or not y or not z", Suite((Call('return', Call('or', Call('not', Name('x')), Call('or', Call('not', Name('y')), Call('not', Name('z'))))),)))
check("not x or not y or not z", Suite((Call('return', Call('or', Call('not', Name('x')), Call('or', Call('not', Name('y')), Call('not', Name('z'))))),)))
check("not x or not y or not z", Suite((Call('return', Call('or', Call('not', Name('x')), Call('or', Call('not', Name('y')), Call('not', Name('z'))))),)))
check("(not x or not y, None) and not z", Suite((Call('return', Call('and', Call('tuple', Call('or', Call('not', Name('x')), Call('not', Name('y'))), Const(None)), Call('not', Name('z')))),)))
check("not x and (not y or not z, None)", Suite((Call('return', Call('and', Call('not', Name('x')), Call('tuple', Call('or', Call('not', Name('y')), Call('not', Name('z'))), Const(None)))),)))
check("not x(1, None)", Suite((Call('return', Call('not', Call(Name('x'), Const(1), Const(None)))),)))
check("not x(1, None) and not y(2, None)", Suite((Call('return', Call('and', Call('not', Call(Name('x'), Const(1), Const(None))), Call('not', Call(Name('y'), Const(2), Const(None))))),)))
check("not x(1, None) and not y(2, None) and not z(3, None)", Suite((Call('return', Call('and', Call('not', Call(Name('x'), Const(1), Const(None))), Call('and', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check("not x(1, None) and not y(2, None) and not z(3, None)", Suite((Call('return', Call('and', Call('not', Call(Name('x'), Const(1), Const(None))), Call('and', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check("not x(1, None) and not y(2, None) and not z(3, None)", Suite((Call('return', Call('and', Call('not', Call(Name('x'), Const(1), Const(None))), Call('and', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check("not x(1, None) or not y(2, None)", Suite((Call('return', Call('or', Call('not', Call(Name('x'), Const(1), Const(None))), Call('not', Call(Name('y'), Const(2), Const(None))))),)))
check("not x(1, None) or not y(2, None) or not z(3, None)", Suite((Call('return', Call('or', Call('not', Call(Name('x'), Const(1), Const(None))), Call('or', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check("not x(1, None) or not y(2, None) or not z(3, None)", Suite((Call('return', Call('or', Call('not', Call(Name('x'), Const(1), Const(None))), Call('or', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check("not x(1, None) or not y(2, None) or not z(3, None)", Suite((Call('return', Call('or', Call('not', Call(Name('x'), Const(1), Const(None))), Call('or', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check("(not x(1, None) or not y(2, None), None) and not z(3, None)", Suite((Call('return', Call('and', Call('tuple', Call('or', Call('not', Call(Name('x'), Const(1), Const(None))), Call('not', Call(Name('y'), Const(2), Const(None)))), Const(None)), Call('not', Call(Name('z'), Const(3), Const(None))))),)))
check("not x(1, None) and (not y(2, None) or not z(3, None), None)", Suite((Call('return', Call('and', Call('not', Call(Name('x'), Const(1), Const(None))), Call('tuple', Call('or', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))), Const(None)))),)))
check("not x.a", Suite((Call('return', Call('not', Call('.', Name('x'), 'a'))),)))
check("not x.a and not y.b", Suite((Call('return', Call('and', Call('not', Call('.', Name('x'), 'a')), Call('not', Call('.', Name('y'), 'b')))),)))
check("not x.a and not y.b and not z.c", Suite((Call('return', Call('and', Call('not', Call('.', Name('x'), 'a')), Call('and', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check("not x.a and not y.b and not z.c", Suite((Call('return', Call('and', Call('not', Call('.', Name('x'), 'a')), Call('and', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check("not x.a and not y.b and not z.c", Suite((Call('return', Call('and', Call('not', Call('.', Name('x'), 'a')), Call('and', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check("not x.a or not y.b", Suite((Call('return', Call('or', Call('not', Call('.', Name('x'), 'a')), Call('not', Call('.', Name('y'), 'b')))),)))
check("not x.a or not y.b or not z.c", Suite((Call('return', Call('or', Call('not', Call('.', Name('x'), 'a')), Call('or', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check("not x.a or not y.b or not z.c", Suite((Call('return', Call('or', Call('not', Call('.', Name('x'), 'a')), Call('or', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check("not x.a or not y.b or not z.c", Suite((Call('return', Call('or', Call('not', Call('.', Name('x'), 'a')), Call('or', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check("(not x.a or not y.b, None) and not z.c", Suite((Call('return', Call('and', Call('tuple', Call('or', Call('not', Call('.', Name('x'), 'a')), Call('not', Call('.', Name('y'), 'b'))), Const(None)), Call('not', Call('.', Name('z'), 'c')))),)))
check("not x.a and (not y.b or not z.c, None)", Suite((Call('return', Call('and', Call('not', Call('.', Name('x'), 'a')), Call('tuple', Call('or', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))), Const(None)))),)))
check("False", Suite((Call('return', Const(False)),)))
check("True", Suite((Call('return', Const(True)),)))
check("not x", Suite((Call('return', Call('not', Name('x'))),)))
check("not x and not y", Suite((Call('return', Call('and', Call('not', Name('x')), Call('not', Name('y')))),)))
check("not x and not y and not z", Suite((Call('return', Call('and', Call('not', Name('x')), Call('and', Call('not', Name('y')), Call('not', Name('z'))))),)))
check("not x and not y and not z", Suite((Call('return', Call('and', Call('not', Name('x')), Call('and', Call('not', Name('y')), Call('not', Name('z'))))),)))
check("not x and not y and not z", Suite((Call('return', Call('and', Call('not', Name('x')), Call('and', Call('not', Name('y')), Call('not', Name('z'))))),)))
check("not x or not y", Suite((Call('return', Call('or', Call('not', Name('x')), Call('not', Name('y')))),)))
check("not x or not y or not z", Suite((Call('return', Call('or', Call('not', Name('x')), Call('or', Call('not', Name('y')), Call('not', Name('z'))))),)))
check("not x or not y or not z", Suite((Call('return', Call('or', Call('not', Name('x')), Call('or', Call('not', Name('y')), Call('not', Name('z'))))),)))
check("not x or not y or not z", Suite((Call('return', Call('or', Call('not', Name('x')), Call('or', Call('not', Name('y')), Call('not', Name('z'))))),)))
check("(not x or not y, None) and not z", Suite((Call('return', Call('and', Call('tuple', Call('or', Call('not', Name('x')), Call('not', Name('y'))), Const(None)), Call('not', Name('z')))),)))
check("not x and (not y or not z, None)", Suite((Call('return', Call('and', Call('not', Name('x')), Call('tuple', Call('or', Call('not', Name('y')), Call('not', Name('z'))), Const(None)))),)))
check("not x(1, None)", Suite((Call('return', Call('not', Call(Name('x'), Const(1), Const(None)))),)))
check("not x(1, None) and not y(2, None)", Suite((Call('return', Call('and', Call('not', Call(Name('x'), Const(1), Const(None))), Call('not', Call(Name('y'), Const(2), Const(None))))),)))
check("not x(1, None) and not y(2, None) and not z(3, None)", Suite((Call('return', Call('and', Call('not', Call(Name('x'), Const(1), Const(None))), Call('and', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check("not x(1, None) and not y(2, None) and not z(3, None)", Suite((Call('return', Call('and', Call('not', Call(Name('x'), Const(1), Const(None))), Call('and', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check("not x(1, None) and not y(2, None) and not z(3, None)", Suite((Call('return', Call('and', Call('not', Call(Name('x'), Const(1), Const(None))), Call('and', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check("not x(1, None) or not y(2, None)", Suite((Call('return', Call('or', Call('not', Call(Name('x'), Const(1), Const(None))), Call('not', Call(Name('y'), Const(2), Const(None))))),)))
check("not x(1, None) or not y(2, None) or not z(3, None)", Suite((Call('return', Call('or', Call('not', Call(Name('x'), Const(1), Const(None))), Call('or', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check("not x(1, None) or not y(2, None) or not z(3, None)", Suite((Call('return', Call('or', Call('not', Call(Name('x'), Const(1), Const(None))), Call('or', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check("not x(1, None) or not y(2, None) or not z(3, None)", Suite((Call('return', Call('or', Call('not', Call(Name('x'), Const(1), Const(None))), Call('or', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check("(not x(1, None) or not y(2, None), None) and not z(3, None)", Suite((Call('return', Call('and', Call('tuple', Call('or', Call('not', Call(Name('x'), Const(1), Const(None))), Call('not', Call(Name('y'), Const(2), Const(None)))), Const(None)), Call('not', Call(Name('z'), Const(3), Const(None))))),)))
check("not x(1, None) and (not y(2, None) or not z(3, None), None)", Suite((Call('return', Call('and', Call('not', Call(Name('x'), Const(1), Const(None))), Call('tuple', Call('or', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))), Const(None)))),)))
check("not x.a", Suite((Call('return', Call('not', Call('.', Name('x'), 'a'))),)))
check("not x.a and not y.b", Suite((Call('return', Call('and', Call('not', Call('.', Name('x'), 'a')), Call('not', Call('.', Name('y'), 'b')))),)))
check("not x.a and not y.b and not z.c", Suite((Call('return', Call('and', Call('not', Call('.', Name('x'), 'a')), Call('and', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check("not x.a and not y.b and not z.c", Suite((Call('return', Call('and', Call('not', Call('.', Name('x'), 'a')), Call('and', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check("not x.a and not y.b and not z.c", Suite((Call('return', Call('and', Call('not', Call('.', Name('x'), 'a')), Call('and', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check("not x.a or not y.b", Suite((Call('return', Call('or', Call('not', Call('.', Name('x'), 'a')), Call('not', Call('.', Name('y'), 'b')))),)))
check("not x.a or not y.b or not z.c", Suite((Call('return', Call('or', Call('not', Call('.', Name('x'), 'a')), Call('or', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check("not x.a or not y.b or not z.c", Suite((Call('return', Call('or', Call('not', Call('.', Name('x'), 'a')), Call('or', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check("not x.a or not y.b or not z.c", Suite((Call('return', Call('or', Call('not', Call('.', Name('x'), 'a')), Call('or', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check("(not x.a or not y.b, None) and not z.c", Suite((Call('return', Call('and', Call('tuple', Call('or', Call('not', Call('.', Name('x'), 'a')), Call('not', Call('.', Name('y'), 'b'))), Const(None)), Call('not', Call('.', Name('z'), 'c')))),)))
check("not x.a and (not y.b or not z.c, None)", Suite((Call('return', Call('and', Call('not', Call('.', Name('x'), 'a')), Call('tuple', Call('or', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))), Const(None)))),)))
check('''False''', Suite((Call('return', Const(False)),)))
check('''True''', Suite((Call('return', Const(True)),)))
check('''not x''', Suite((Call('return', Call('not', Name('x'))),)))
check('''not x and not y''', Suite((Call('return', Call('and', Call('not', Name('x')), Call('not', Name('y')))),)))
check('''not x and not y and not z''', Suite((Call('return', Call('and', Call('not', Name('x')), Call('and', Call('not', Name('y')), Call('not', Name('z'))))),)))
check('''not x and not y and not z''', Suite((Call('return', Call('and', Call('not', Name('x')), Call('and', Call('not', Name('y')), Call('not', Name('z'))))),)))
check('''not x and not y and not z''', Suite((Call('return', Call('and', Call('not', Name('x')), Call('and', Call('not', Name('y')), Call('not', Name('z'))))),)))
check('''not x or not y''', Suite((Call('return', Call('or', Call('not', Name('x')), Call('not', Name('y')))),)))
check('''not x or not y or not z''', Suite((Call('return', Call('or', Call('not', Name('x')), Call('or', Call('not', Name('y')), Call('not', Name('z'))))),)))
check('''not x or not y or not z''', Suite((Call('return', Call('or', Call('not', Name('x')), Call('or', Call('not', Name('y')), Call('not', Name('z'))))),)))
check('''not x or not y or not z''', Suite((Call('return', Call('or', Call('not', Name('x')), Call('or', Call('not', Name('y')), Call('not', Name('z'))))),)))
check('''(not x or not y, None) and not z''', Suite((Call('return', Call('and', Call('tuple', Call('or', Call('not', Name('x')), Call('not', Name('y'))), Const(None)), Call('not', Name('z')))),)))
check('''not x and (not y or not z, None)''', Suite((Call('return', Call('and', Call('not', Name('x')), Call('tuple', Call('or', Call('not', Name('y')), Call('not', Name('z'))), Const(None)))),)))
check('''not x(1, None)''', Suite((Call('return', Call('not', Call(Name('x'), Const(1), Const(None)))),)))
check('''not x(1, None) and not y(2, None)''', Suite((Call('return', Call('and', Call('not', Call(Name('x'), Const(1), Const(None))), Call('not', Call(Name('y'), Const(2), Const(None))))),)))
check('''not x(1, None) and not y(2, None) and not z(3, None)''', Suite((Call('return', Call('and', Call('not', Call(Name('x'), Const(1), Const(None))), Call('and', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check('''not x(1, None) and not y(2, None) and not z(3, None)''', Suite((Call('return', Call('and', Call('not', Call(Name('x'), Const(1), Const(None))), Call('and', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check('''not x(1, None) and not y(2, None) and not z(3, None)''', Suite((Call('return', Call('and', Call('not', Call(Name('x'), Const(1), Const(None))), Call('and', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check('''not x(1, None) or not y(2, None)''', Suite((Call('return', Call('or', Call('not', Call(Name('x'), Const(1), Const(None))), Call('not', Call(Name('y'), Const(2), Const(None))))),)))
check('''not x(1, None) or not y(2, None) or not z(3, None)''', Suite((Call('return', Call('or', Call('not', Call(Name('x'), Const(1), Const(None))), Call('or', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check('''not x(1, None) or not y(2, None) or not z(3, None)''', Suite((Call('return', Call('or', Call('not', Call(Name('x'), Const(1), Const(None))), Call('or', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check('''not x(1, None) or not y(2, None) or not z(3, None)''', Suite((Call('return', Call('or', Call('not', Call(Name('x'), Const(1), Const(None))), Call('or', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))))),)))
check('''(not x(1, None) or not y(2, None), None) and not z(3, None)''', Suite((Call('return', Call('and', Call('tuple', Call('or', Call('not', Call(Name('x'), Const(1), Const(None))), Call('not', Call(Name('y'), Const(2), Const(None)))), Const(None)), Call('not', Call(Name('z'), Const(3), Const(None))))),)))
check('''not x(1, None) and (not y(2, None) or not z(3, None), None)''', Suite((Call('return', Call('and', Call('not', Call(Name('x'), Const(1), Const(None))), Call('tuple', Call('or', Call('not', Call(Name('y'), Const(2), Const(None))), Call('not', Call(Name('z'), Const(3), Const(None)))), Const(None)))),)))
check('''not x.a''', Suite((Call('return', Call('not', Call('.', Name('x'), 'a'))),)))
check('''not x.a and not y.b''', Suite((Call('return', Call('and', Call('not', Call('.', Name('x'), 'a')), Call('not', Call('.', Name('y'), 'b')))),)))
check('''not x.a and not y.b and not z.c''', Suite((Call('return', Call('and', Call('not', Call('.', Name('x'), 'a')), Call('and', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check('''not x.a and not y.b and not z.c''', Suite((Call('return', Call('and', Call('not', Call('.', Name('x'), 'a')), Call('and', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check('''not x.a and not y.b and not z.c''', Suite((Call('return', Call('and', Call('not', Call('.', Name('x'), 'a')), Call('and', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check('''not x.a or not y.b''', Suite((Call('return', Call('or', Call('not', Call('.', Name('x'), 'a')), Call('not', Call('.', Name('y'), 'b')))),)))
check('''not x.a or not y.b or not z.c''', Suite((Call('return', Call('or', Call('not', Call('.', Name('x'), 'a')), Call('or', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check('''not x.a or not y.b or not z.c''', Suite((Call('return', Call('or', Call('not', Call('.', Name('x'), 'a')), Call('or', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check('''not x.a or not y.b or not z.c''', Suite((Call('return', Call('or', Call('not', Call('.', Name('x'), 'a')), Call('or', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))))),)))
check('''(not x.a or not y.b, None) and not z.c''', Suite((Call('return', Call('and', Call('tuple', Call('or', Call('not', Call('.', Name('x'), 'a')), Call('not', Call('.', Name('y'), 'b'))), Const(None)), Call('not', Call('.', Name('z'), 'c')))),)))
check('''not x.a and (not y.b or not z.c, None)''', Suite((Call('return', Call('and', Call('not', Call('.', Name('x'), 'a')), Call('tuple', Call('or', Call('not', Call('.', Name('y'), 'b')), Call('not', Call('.', Name('z'), 'c'))), Const(None)))),)))
check('''x != y''', Suite((Call('return', Call('!=', Name('x'), Name('y'))),)))
check('''x == y''', Suite((Call('return', Call('==', Name('x'), Name('y'))),)))
check('''x <= y''', Suite((Call('return', Call('<=', Name('x'), Name('y'))),)))
check('''x > y''', Suite((Call('return', Call('>', Name('x'), Name('y'))),)))
check('''x >= y''', Suite((Call('return', Call('>=', Name('x'), Name('y'))),)))
check('''x < y''', Suite((Call('return', Call('<', Name('x'), Name('y'))),)))
check('''x not in y''', Suite((Call('return', Call('not-in', Name('x'), Name('y'))),)))
check('''x in y''', Suite((Call('return', Call('in', Name('x'), Name('y'))),)))
check('''x == y and y == z''', Suite((Call('return', Call('and', Call('==', Name('x'), Name('y')), Call('==', Name('y'), Name('z')))),)))
check('''x == y and y == z''', Suite((Call('return', Call('and', Call('==', Name('x'), Name('y')), Call('==', Name('y'), Name('z')))),)))
check('''x == y or y == z''', Suite((Call('return', Call('or', Call('==', Name('x'), Name('y')), Call('==', Name('y'), Name('z')))),)))
check('''x != y or y != z''', Suite((Call('return', Call('or', Call('!=', Name('x'), Name('y')), Call('!=', Name('y'), Name('z')))),)))
check('''x != y or y != z''', Suite((Call('return', Call('or', Call('!=', Name('x'), Name('y')), Call('!=', Name('y'), Name('z')))),)))
check('''x != y or y == z''', Suite((Call('return', Call('or', Call('!=', Name('x'), Name('y')), Call('==', Name('y'), Name('z')))),)))
check('''a and b and c and d and e''', Suite((Call('return', Call('and', Name('a'), Call('and', Name('b'), Call('and', Name('c'), Call('and', Name('d'), Name('e')))))),)))
check('''a and b and c and d and e''', Suite((Call('return', Call('and', Name('a'), Call('and', Name('b'), Call('and', Name('c'), Call('and', Name('d'), Name('e')))))),)))
check("def g(x): return 3.14", Suite((Assign((Name('g'),), Def(('x',), (), Suite((Call('return', Const(3.14)),)))), Call('return', Const(None)),)))
check("""def g(x):
return 3.14""", Suite((Assign((Name('g'),), Def(('x',), (), Suite((Call('return', Const(3.14)),)))), Call('return', Const(None)),)))
check("def g(x, y): return x**2", Suite((Assign((Name('g'),), Def(('x', 'y'), (), Suite((Call('return', Call('**', Name('x'), Const(2))),)))), Call('return', Const(None)),)))
check("""def g(x, y):
return x**2""", Suite((Assign((Name('g'),), Def(('x', 'y'), (), Suite((Call('return', Call('**', Name('x'), Const(2))),)))), Call('return', Const(None)),)))
check("lambda: 3.14", Suite((Call('return', Def((), (), Suite((Call('return', Const(3.14)),)))),)))
check("lambda x: x**2", Suite((Call('return', Def(('x',), (), Suite((Call('return', Call('**', Name('x'), Const(2))),)))),)))
check("(lambda x: x**2, None)", Suite((Call('return', Call('tuple', Def(('x',), (), Suite((Call('return', Call('**', Name('x'), Const(2))),))), Const(None))),)))
check("1 if x == 0 else 2", Suite((Call('if', Call('==', Name('x'), Const(0)), Suite((Call('return', Const(1)),)), Suite((Call('return', Const(2)),))),)))
check("y = (1 if x == 0 else 2, None)", Suite((Assign((Name('y'),), Call('tuple', Call('?', Call('==', Name('x'), Const(0)), Const(1), Const(2)), Const(None))), Call('return', Const(None)),)))
check("1 if x == 0 else None", Suite((Call('if', Call('==', Name('x'), Const(0)), Suite((Call('return', Const(1)),)), Suite((Call('return', Const(None)),))),)))
check("(1 if x == 0 else 2, None)", Suite((Call('return', Call('tuple', Call('?', Call('==', Name('x'), Const(0)), Const(1), Const(2)), Const(None))),)))
check("(1 if x == 0 else None, None)", Suite((Call('return', Call('tuple', Call('?', Call('==', Name('x'), Const(0)), Const(1), Const(None)), Const(None))),)))
check("""if x == 0:
return 1""", Suite((Call('if', Call('==', Name('x'), Const(0)), Suite((Call('return', Const(1)),)), Suite((Call('return', Const(None)),))),)))
check("""if x == 0:
y = 1
return 1""", Suite((Call('if', Call('==', Name('x'), Const(0)), Suite((Assign((Name('y'),), Const(1)), Call('return', Const(1)),)), Suite((Call('return', Const(None)),))),)))
check('''if x == 0:
return 1
else:
return 2''', Suite((Call('if', Call('==', Name('x'), Const(0)), Suite((Call('return', Const(1)),)), Suite((Call('return', Const(2)),))),)))
check('''if x == 0:
y = 1
return 1
else:
y = 2
return 2''', Suite((Call('if', Call('==', Name('x'), Const(0)), Suite((Assign((Name('y'),), Const(1)), Call('return', Const(1)),)), Suite((Assign((Name('y'),), Const(2)), Call("return", Const(2))))),)))
check('''if x == 0:
return 1
elif x == 1:
return 2
else:
return 3''', Suite((Call('if', Call('==', Name('x'), Const(0)), Suite((Call('return', Const(1)),)), Suite((Call('if', Call('==', Name('x'), Const(1)), Suite((Call('return', Const(2)),)), Suite((Call('return', Const(3)),))),))),)))
check('''if x == 0:
y = 1
return 1
elif x == 1:
y = 2
return 2
else:
y = 3
return 3''', Suite((Call('if', Call('==', Name('x'), Const(0)), Suite((Assign((Name('y'),), Const(1)), Call('return', Const(1)),)), Suite((Call('if', Call('==', Name('x'), Const(1)), Suite((Assign((Name('y'),), Const(2)), Call('return', Const(2)),)), Suite((Assign((Name('y'),), Const(3)), Call("return", Const(3))))),))),)))
check('''if x == 0:
y = 1''', Suite((Call('if', Call('==', Name('x'), Const(0)), Suite((Assign((Name('y'),), Const(1)),))), Call('return', Const(None)),)))
check('''if x == 0:
y = 1
z = 1''', Suite((Call('if', Call('==', Name('x'), Const(0)), Suite((Assign((Name('y'),), Const(1)), Assign((Name('z'),), Const(1)),))), Call('return', Const(None)),)))
check('''if x == 0:
y = 1
else:
y = 2''', Suite((Call('if', Call('==', Name('x'), Const(0)), Suite((Assign((Name('y'),), Const(1)),)), Suite((Assign((Name('y'),), Const(2)),))), Call('return', Const(None)),)))
check('''if x == 0:
y = 1
z = 1
else:
y = 2
z = 2''', Suite((Call('if', Call('==', Name('x'), Const(0)), Suite((Assign((Name('y'),), Const(1)), Assign((Name('z'),), Const(1)),)), Suite((Assign((Name('y'),), Const(2)), Assign((Name('z'),), Const(2)),))), Call('return', Const(None)),)))
# check("print(None)", Suite((Call('return', Call(Name('print'), Const(None))),)))
# check("print(1, None)", Suite((Call('return', Call(Name('print'), Const(1), Const(None))),)))
# check("print(1, 2, 3, None)", Suite((Call('return', Call(Name('print'), Const(1), Const(2), Const(3), Const(None))),)))
check("[]", Suite((Call('return', Call('list')),)))
check("[1]", Suite((Call('return', Call('list', Const(1))),)))
check("[1, 2]", Suite((Call('return', Call('list', Const(1), Const(2))),)))
check("[one]", Suite((Call('return', Call('list', Name('one'))),)))
check("[one, two]", Suite((Call('return', Call('list', Name('one'), Name('two'))),)))
check("['one']", Suite((Call('return', Call('list', Const('one'))),)))
check("['one', 'two']", Suite((Call('return', Call('list', Const('one'), Const('two'))),)))
check("set([])", Suite((Call('return', Call(Name('set'), Call('list'))),)))
check("set([1])", Suite((Call('return', Call(Name('set'), Call('list', Const(1)))),)))
check("set([1, 2])", Suite((Call('return', Call(Name('set'), Call('list', Const(1), Const(2)))),)))
check("set([one])", Suite((Call('return', Call(Name('set'), Call('list', Name('one')))),)))
check("set([one, two])", Suite((Call('return', Call(Name('set'), Call('list', Name('one'), Name('two')))),)))
check("set(['one'])", Suite((Call('return', Call(Name('set'), Call('list', Const('one')))),)))
check("set(['one', 'two'])", Suite((Call('return', Call(Name('set'), Call('list', Const('one'), Const('two')))),)))
check("{}", Suite((Call('return', Call('dict')),)))
check("{1}", Suite((Call('return', Call('set', Const(1))),)))
check("{1, 2}", Suite((Call('return', Call('set', Const(1), Const(2))),)))
check("{one}", Suite((Call('return', Call('set', Name('one'))),)))
check("{one, two}", Suite((Call('return', Call('set', Name('one'), Name('two'))),)))
check("{'one'}", Suite((Call('return', Call('set', Const('one'))),)))
check("{'one', 'two'}", Suite((Call('return', Call('set', Const('one'), Const('two'))),)))
check("{'x': 1}", Suite((Call('return', Call('dict', Const('x'), Const(1))),)))
check("{'x': 1, 'y': 2}", Suite((Call('return', Call('dict', Const('x'), Const(1), Const('y'), Const(2))),)))
check("{'x': 1, 'y': 2, 'z': 3}", Suite((Call('return', Call('dict', Const('x'), Const(1), Const('y'), Const(2), Const('z'), Const(3))),)))
check("{'x': one}", Suite((Call('return', Call('dict', Const('x'), Name('one'))),)))
check("{'x': one, 'y': two}", Suite((Call('return', Call('dict', Const('x'), Name('one'), Const('y'), Name('two'))),)))
check("{'x': one, 'y': two, 'z': three}", Suite((Call('return', Call('dict', Const('x'), Name('one'), Const('y'), Name('two'), Const('z'), Name('three'))),)))
check("{1: 1}", Suite((Call('return', Call('dict', Const(1), Const(1))),)))
check("{1: 1, 2: 2}", Suite((Call('return', Call('dict', Const(1), Const(1), Const(2), Const(2))),)))
check("{1: 1, 2: 2, 3: 3}", Suite((Call('return', Call('dict', Const(1), Const(1), Const(2), Const(2), Const(3), Const(3))),)))
check("{1: one}", Suite((Call('return', Call('dict', Const(1), Name('one'))),)))
check("{1: one, 2: two}", Suite((Call('return', Call('dict', Const(1), Name('one'), Const(2), Name('two'))),)))
check("{1: one, 2: two, 3: three}", Suite((Call('return', Call('dict', Const(1), Name('one'), Const(2), Name('two'), Const(3), Name('three'))),)))
check("{one: 1}", Suite((Call('return', Call('dict', Name('one'), Const(1))),)))
check("{one: 1, two: 2}", Suite((Call('return', Call('dict', Name('one'), Const(1), Name('two'), Const(2))),)))
check("{one: 1, two: 2, three: 3}", Suite((Call('return', Call('dict', Name('one'), Const(1), Name('two'), Const(2), Name('three'), Const(3))),)))
check("{one: one}", Suite((Call('return', Call('dict', Name('one'), Name('one'))),)))
check("{one: one, two: two}", Suite((Call('return', Call('dict', Name('one'), Name('one'), Name('two'), Name('two'))),)))
check("{one: one, two: two, three: three}", Suite((Call('return', Call('dict', Name('one'), Name('one'), Name('two'), Name('two'), Name('three'), Name('three'))),)))
check("[x**2 for x in something]", Suite((Call('return', Call(Call('.', Name('something'), 'map'), Def(('x',), (), Suite((Call('return', Call('**', Name('x'), Const(2))),))))),)))
check("[x**2 for x in something if x > 0]", Suite((Call('return', Call(Call('.', Call(Call('.', Name('something'), 'filter'), Def(('x',), (), Suite((Call('return', Call('>', Name('x'), Const(0))),)))), 'map'), Def(('x',), (), Suite((Call('return', Call('**', Name('x'), Const(2))),))))),)))
check("[y**2 for x in something for y in x]", Suite((Call('return', Call(Call('.', Name('something'), 'map'), Def(('x',), (), Suite((Call('return', Call(Call('.', Name('x'), 'map'), Def(('y',), (), Suite((Call('return', Call('**', Name('y'), Const(2))),))))),))))),)))
check("[y**2 for x in something for y in x if x > 0]", Suite((Call('return', Call(Call('.', Name('something'), 'map'), Def(('x',), (), Suite((Call('return', Call(Call('.', Call(Call('.', Name('x'), 'filter'), Def(('y',), (), Suite((Call('return', Call('>', Name('x'), Const(0))),)))), 'map'), Def(('y',), (), Suite((Call('return', Call('**', Name('y'), Const(2))),))))),))))),)))
check("[y**2 for x in something for y in x if y > 0]", Suite((Call('return', Call(Call('.', Name('something'), 'map'), Def(('x',), (), Suite((Call('return', Call(Call('.', Call(Call('.', Name('x'), 'filter'), Def(('y',), (), Suite((Call('return', Call('>', Name('y'), Const(0))),)))), 'map'), Def(('y',), (), Suite((Call('return', Call('**', Name('y'), Const(2))),))))),))))),)))
check("[y**2 for x in something if x for y in x if x > 0]", Suite((Call('return', Call(Call('.', Call(Call('.', Name('something'), 'filter'), Def(('x',), (), Suite((Call('return', Name('x')),)))), 'map'), Def(('x',), (), Suite((Call('return', Call(Call('.', Call(Call('.', Name('x'), 'filter'), Def(('y',), (), Suite((Call('return', Call('>', Name('x'), Const(0))),)))), 'map'), Def(('y',), (), Suite((Call('return', Call('**', Name('y'), Const(2))),))))),))))),)))
check("f([x**2 for x in something], None)", Suite((Call('return', Call(Name('f'), Call(Call('.', Name('something'), 'map'), Def(('x',), (), Suite((Call('return', Call('**', Name('x'), Const(2))),)))), Const(None))),)))
check("f([x**2 for x in something if x > 0], None)", Suite((Call('return', Call(Name('f'), Call(Call('.', Call(Call('.', Name('something'), 'filter'), Def(('x',), (), Suite((Call('return', Call('>', Name('x'), Const(0))),)))), 'map'), Def(('x',), (), Suite((Call('return', Call('**', Name('x'), Const(2))),)))), Const(None))),)))
check("f([y**2 for x in something for y in x], None)", Suite((Call('return', Call(Name('f'), Call(Call('.', Name('something'), 'map'), Def(('x',), (), Suite((Call('return', Call(Call('.', Name('x'), 'map'), Def(('y',), (), Suite((Call('return', Call('**', Name('y'), Const(2))),))))),)))), Const(None))),)))
check("f([y**2 for x in something for y in x if x > 0], None)", Suite((Call('return', Call(Name('f'), Call(Call('.', Name('something'), 'map'), Def(('x',), (), Suite((Call('return', Call(Call('.', Call(Call('.', Name('x'), 'filter'), Def(('y',), (), Suite((Call('return', Call('>', Name('x'), Const(0))),)))), 'map'), Def(('y',), (), Suite((Call('return', Call('**', Name('y'), Const(2))),))))),)))), Const(None))),)))
check("f([y**2 for x in something for y in x if y > 0], None)", Suite((Call('return', Call(Name('f'), Call(Call('.', Name('something'), 'map'), Def(('x',), (), Suite((Call('return', Call(Call('.', Call(Call('.', Name('x'), 'filter'), Def(('y',), (), Suite((Call('return', Call('>', Name('y'), Const(0))),)))), 'map'), Def(('y',), (), Suite((Call('return', Call('**', Name('y'), Const(2))),))))),)))), Const(None))),)))
check("f([y**2 for x in something if x for y in x if x > 0], None)", Suite((Call('return', Call(Name('f'), Call(Call('.', Call(Call('.', Name('something'), 'filter'), Def(('x',), (), Suite((Call('return', Name('x')),)))), 'map'), Def(('x',), (), Suite((Call('return', Call(Call('.', Call(Call('.', Name('x'), 'filter'), Def(('y',), (), Suite((Call('return', Call('>', Name('x'), Const(0))),)))), 'map'), Def(('y',), (), Suite((Call('return', Call('**', Name('y'), Const(2))),))))),)))), Const(None))),)))
check("f((x**2 for x in something), None)", Suite((Call('return', Call(Name('f'), Call(Call('.', Name('something'), 'map'), Def(('x',), (), Suite((Call('return', Call('**', Name('x'), Const(2))),)))), Const(None))),)))
check("f((x**2 for x in something if x > 0), None)", Suite((Call('return', Call(Name('f'), Call(Call('.', Call(Call('.', Name('something'), 'filter'), Def(('x',), (), Suite((Call('return', Call('>', Name('x'), Const(0))),)))), 'map'), Def(('x',), (), Suite((Call('return', Call('**', Name('x'), Const(2))),)))), Const(None))),)))
check("f((y**2 for x in something for y in x), None)", Suite((Call('return', Call(Name('f'), Call(Call('.', Name('something'), 'map'), Def(('x',), (), Suite((Call('return', Call(Call('.', Name('x'), 'map'), Def(('y',), (), Suite((Call('return', Call('**', Name('y'), Const(2))),))))),)))), Const(None))),)))
check("f((y**2 for x in something for y in x if x > 0), None)", Suite((Call('return', Call(Name('f'), Call(Call('.', Name('something'), 'map'), Def(('x',), (), Suite((Call('return', Call(Call('.', Call(Call('.', Name('x'), 'filter'), Def(('y',), (), Suite((Call('return', Call('>', Name('x'), Const(0))),)))), 'map'), Def(('y',), (), Suite((Call('return', Call('**', Name('y'), Const(2))),))))),)))), Const(None))),)))
check("f((y**2 for x in something for y in x if y > 0), None)", Suite((Call('return', Call(Name('f'), Call(Call('.', Name('something'), 'map'), Def(('x',), (), Suite((Call('return', Call(Call('.', Call(Call('.', Name('x'), 'filter'), Def(('y',), (), Suite((Call('return', Call('>', Name('y'), Const(0))),)))), 'map'), Def(('y',), (), Suite((Call('return', Call('**', Name('y'), Const(2))),))))),)))), Const(None))),)))
check("f((y**2 for x in something if x for y in x if x > 0), None)", Suite((Call('return', Call(Name('f'), Call(Call('.', Call(Call('.', Name('something'), 'filter'), Def(('x',), (), Suite((Call('return', Name('x')),)))), 'map'), Def(('x',), (), Suite((Call('return', Call(Call('.', Call(Call('.', Name('x'), 'filter'), Def(('y',), (), Suite((Call('return', Call('>', Name('x'), Const(0))),)))), 'map'), Def(('y',), (), Suite((Call('return', Call('**', Name('y'), Const(2))),))))),)))), Const(None))),)))
check("f(one=1)", Suite((Call('return', CallKeyword(Name('f'), (), (('one', Const(1)),))),)))
check("f(one=1, two=2)", Suite((Call('return', CallKeyword(Name('f'), (), (('one', Const(1)), ('two', Const(2))))),)))
check("f(x, one=1)", Suite((Call('return', CallKeyword(Name('f'), (Name('x'),), (('one', Const(1)),))),)))
check("f(x, one=1, two=2)", Suite((Call('return', CallKeyword(Name('f'), (Name('x'),), (('one', Const(1)), ('two', Const(2))))),)))
check("x[..., :]", Suite((Call('return', Call('[.]', Name('x'), Const(Ellipsis), Call('slice', Const(None), Const(None), Const(None)))),)))
check('x = y = 1', Suite((Assign((Name('x'), Name('y')), Const(1)), Call('return', Const(None)),)))
check('x = y = z = 1', Suite((Assign((Name('x'), Name('y'), Name('z')), Const(1)), Call('return', Const(None)),)))
check('x, y = 1', Suite((Assign((Unpack((Name('x'), Name('y'))),), Const(1)), Call('return', Const(None)),)))
check('x, y = z = 1', Suite((Assign((Unpack((Name('x'), Name('y'))), Name('z')), Const(1)), Call('return', Const(None)),)))
check('x = y, z = 1', Suite((Assign((Name('x'), Unpack((Name('y'), Name('z')))), Const(1)), Call('return', Const(None)),)))
check('x.a = y = 1', Suite((Assign((Call('.', Name('x'), 'a'), Name('y'),), Const(1)), Call('return', Const(None)),)))
check('x.a = y = z = 1', Suite((Assign((Call('.', Name('x'), 'a'), Name('y'), Name('z'),), Const(1)), Call('return', Const(None)),)))
check('x.a, y = 1', Suite((Assign((Unpack((Call('.', Name('x'), 'a'), Name('y'))),), Const(1)), Call('return', Const(None)),)))
check('x.a, y = z = 1', Suite((Assign((Unpack((Call('.', Name('x'), 'a'), Name('y'))), Name('z')), Const(1)), Call('return', Const(None)),)))
check('x.a = y, z = 1', Suite((Assign((Call('.', Name('x'), 'a'), Unpack((Name('y'), Name('z')))), Const(1)), Call('return', Const(None)),)))
check('x = y.a = 1', Suite((Assign((Name('x'), Call('.', Name('y'), 'a'),), Const(1)), Call('return', Const(None)),)))
check('x = y.a = z = 1', Suite((Assign((Name('x'), Call('.', Name('y'), 'a'), Name('z')), Const(1)), Call('return', Const(None)),)))
check('x, y.a = 1', Suite((Assign((Unpack((Name('x'), Call('.', Name('y'), 'a'))),), Const(1)), Call('return', Const(None)),)))
check('x, y.a = z = 1', Suite((Assign((Unpack((Name('x'), Call('.', Name('y'), 'a'))), Name('z')), Const(1)), Call('return', Const(None)),)))
check('x = y.a, z = 1', Suite((Assign((Name('x'), Unpack((Call('.', Name('y'), 'a'), Name('z')))), Const(1)), Call('return', Const(None)),)))
check('x = y = z.a = 1', Suite((Assign((Name('x'), Name('y'), Call('.', Name('z'), 'a'),), Const(1)), Call('return', Const(None)),)))
check('x, y = z.a = 1', Suite((Assign((Unpack((Name('x'), Name('y'))), Call('.', Name('z'), 'a'),), Const(1)), Call('return', Const(None)),)))
check('x = y, z.a = 1', Suite((Assign((Name('x'), Unpack((Name('y'), Call('.', Name('z'), 'a'))),), Const(1)), Call('return', Const(None)),)))
check('x[0] = y = 1', Suite((Assign((Call('[.]', Name('x'), Const(0)), Name('y'),), Const(1)), Call('return', Const(None)),)))
check('x[0] = y = z = 1', Suite((Assign((Call('[.]', Name('x'), Const(0)), Name('y'), Name('z')), Const(1)), Call('return', Const(None)),)))
check('x[0], y = 1', Suite((Assign((Unpack((Call('[.]', Name('x'), Const(0)), Name('y'),)),), Const(1)), Call('return', Const(None)),)))
check('x[0], y = z = 1', Suite((Assign((Unpack((Call('[.]', Name('x'), Const(0)), Name('y'))), Name('z')), Const(1)), Call('return', Const(None)),)))
check('x[0] = y, z = 1', Suite((Assign((Call('[.]', Name('x'), Const(0)), Unpack((Name('y'), Name('z')))), Const(1)), Call('return', Const(None)),)))
check('x = y[0] = 1', Suite((Assign((Name('x'), Call('[.]', Name('y'), Const(0)),), Const(1)), Call('return', Const(None)),)))
check('x = y[0] = z = 1', Suite((Assign((Name('x'), Call('[.]', Name('y'), Const(0)), Name('z')), Const(1)), Call('return', Const(None)),)))
check('x, y[0] = 1', Suite((Assign((Unpack((Name('x'), Call('[.]', Name('y'), Const(0)))),), Const(1)), Call('return', Const(None)),)))
check('x, y[0] = z = 1', Suite((Assign((Unpack((Name('x'), Call('[.]', Name('y'), Const(0)))), Name('z')), Const(1)), Call('return', Const(None)),)))
check('x = y[0], z = 1', Suite((Assign((Name('x'), Unpack((Call('[.]', Name('y'), Const(0)), Name('z')))), Const(1)), Call('return', Const(None)),)))
check('x = y = z[0] = 1', Suite((Assign((Name('x'), Name('y'), Call('[.]', Name('z'), Const(0)),), Const(1)), Call('return', Const(None)),)))
check('x, y = z[0] = 1', Suite((Assign((Unpack((Name('x'), Name('y'))), Call('[.]', Name('z'), Const(0)),), Const(1)), Call('return', Const(None)),)))
check('x = y, z[0] = 1', Suite((Assign((Name('x'), Unpack((Name('y'), Call('[.]', Name('z'), Const(0)))),), Const(1)), Call('return', Const(None)),)))
check('x[:, ...] = y = 1', Suite((Assign((Call('[.]', Name('x'), Call('slice', Const(None), Const(None), Const(None)), Const(Ellipsis)), Name('y'),), Const(1)), Call('return', Const(None)),)))
check('x[:, ...] = y = z = 1', Suite((Assign((Call('[.]', Name('x'), Call('slice', Const(None), Const(None), Const(None)), Const(Ellipsis)), Name('y'), Name('z')), Const(1)), Call('return', Const(None)),)))
check('x[:, ...], y = 1', Suite((Assign((Unpack((Call('[.]', Name('x'), Call('slice', Const(None), Const(None), Const(None)), Const(Ellipsis)), Name('y'))),), Const(1)), Call('return', Const(None)),)))
check('x[:, ...], y = z = 1', Suite((Assign((Unpack((Call('[.]', Name('x'), Call('slice', Const(None), Const(None), Const(None)), Const(Ellipsis)), Name('y'))), Name('z')), Const(1)), Call('return', Const(None)),)))
check('x[:, ...] = y, z = 1', Suite((Assign((Call('[.]', Name('x'), Call('slice', Const(None), Const(None), Const(None)), Const(Ellipsis)), Unpack((Name('y'), Name('z')))), Const(1)), Call('return', Const(None)),)))
check('x = y[:, ...] = 1', Suite((Assign((Name('x'), Call('[.]', Name('y'), Call('slice', Const(None), Const(None), Const(None)), Const(Ellipsis)),), Const(1)), Call('return', Const(None)),)))
check('x = y[:, ...] = z = 1', Suite((Assign((Name('x'), Call('[.]', Name('y'), Call('slice', Const(None), Const(None), Const(None)), Const(Ellipsis)), Name('z')), Const(1)), Call('return', Const(None)),)))
check('x, y[:, ...] = 1', Suite((Assign((Unpack((Name('x'), Call('[.]', Name('y'), Call('slice', Const(None), Const(None), Const(None)), Const(Ellipsis)))),), Const(1)), Call('return', Const(None)),)))
check('x, y[:, ...] = z = 1', Suite((Assign((Unpack((Name('x'), Call('[.]', Name('y'), Call('slice', Const(None), Const(None), Const(None)), Const(Ellipsis)))), Name('z')), Const(1)), Call('return', Const(None)),)))
check('x = y[:, ...], z = 1', Suite((Assign((Name('x'), Unpack((Call('[.]', Name('y'), Call('slice', Const(None), Const(None), Const(None)), Const(Ellipsis)), Name('z')))), Const(1)), Call('return', Const(None)),)))
check('x = y = z[:, ...] = 1', Suite((Assign((Name('x'), Name('y'), Call('[.]', Name('z'), Call('slice', Const(None), Const(None), Const(None)), Const(Ellipsis)),), Const(1)), Call('return', Const(None)),)))
check('x, y = z[:, ...] = 1', Suite((Assign((Unpack((Name('x'), Name('y'))), Call('[.]', Name('z'), Call('slice', Const(None), Const(None), Const(None)), Const(Ellipsis)),), Const(1)), Call('return', Const(None)),)))
check('x = y, z[:, ...] = 1', Suite((Assign((Name('x'), Unpack((Name('y'), Call('[.]', Name('z'), Call('slice', Const(None), Const(None), Const(None)), Const(Ellipsis)))),), Const(1)), Call('return', Const(None)),)))
| 125.30479
| 504
| 0.524295
| 13,535
| 86,335
| 3.343332
| 0.006871
| 0.141651
| 0.182975
| 0.208676
| 0.982719
| 0.97346
| 0.969261
| 0.956024
| 0.943052
| 0.92449
| 0
| 0.020523
| 0.091365
| 86,335
| 688
| 505
| 125.486919
| 0.556325
| 0.00388
| 0
| 0.259084
| 0
| 0.041074
| 0.234118
| 0.000256
| 0
| 0
| 0.000116
| 0
| 0.00158
| 1
| 0.00158
| false
| 0
| 0.00316
| 0
| 0.026856
| 0.00316
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
955f0864c9d6a6980d3ca7a32ecb3051ddf99a42
| 80
|
py
|
Python
|
src/api/chat.py
|
ah-choo/api.ahchoo.fun
|
72fb76c8bfb0eb3b8585ba3e9726e14ca83861d3
|
[
"MIT"
] | 1
|
2021-07-14T03:22:42.000Z
|
2021-07-14T03:22:42.000Z
|
src/api/chat.py
|
ah-choo/api.ahchoo.fun
|
72fb76c8bfb0eb3b8585ba3e9726e14ca83861d3
|
[
"MIT"
] | null | null | null |
src/api/chat.py
|
ah-choo/api.ahchoo.fun
|
72fb76c8bfb0eb3b8585ba3e9726e14ca83861d3
|
[
"MIT"
] | null | null | null |
from src.api import app
@app.api.get("/chats")
def get_chats():
return {}
| 11.428571
| 23
| 0.6375
| 13
| 80
| 3.846154
| 0.692308
| 0.32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 80
| 6
| 24
| 13.333333
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0.075
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
9583c0d49c890ea18a9ab9a6df7adfbd616f2ec2
| 643
|
py
|
Python
|
temboo/core/Library/Facebook/Actions/Video/Rates/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/Facebook/Actions/Video/Rates/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/Facebook/Actions/Video/Rates/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
from temboo.Library.Facebook.Actions.Video.Rates.CreateRating import CreateRating, CreateRatingInputSet, CreateRatingResultSet, CreateRatingChoreographyExecution
from temboo.Library.Facebook.Actions.Video.Rates.DeleteRating import DeleteRating, DeleteRatingInputSet, DeleteRatingResultSet, DeleteRatingChoreographyExecution
from temboo.Library.Facebook.Actions.Video.Rates.ReadRatings import ReadRatings, ReadRatingsInputSet, ReadRatingsResultSet, ReadRatingsChoreographyExecution
from temboo.Library.Facebook.Actions.Video.Rates.UpdateRating import UpdateRating, UpdateRatingInputSet, UpdateRatingResultSet, UpdateRatingChoreographyExecution
| 128.6
| 161
| 0.900467
| 52
| 643
| 11.134615
| 0.461538
| 0.069085
| 0.117444
| 0.172712
| 0.290155
| 0.290155
| 0.290155
| 0
| 0
| 0
| 0
| 0
| 0.043546
| 643
| 4
| 162
| 160.75
| 0.941463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
252bdd24e668cd8f9571f000edd936897436c3d3
| 2,812
|
py
|
Python
|
src/autoencoding_rl/ReplayBuffer_updatable.py
|
c-rizz/autoencoding_rl
|
65775630e87184c8809a31a8ef980853d5b49f9f
|
[
"MIT"
] | null | null | null |
src/autoencoding_rl/ReplayBuffer_updatable.py
|
c-rizz/autoencoding_rl
|
65775630e87184c8809a31a8ef980853d5b49f9f
|
[
"MIT"
] | null | null | null |
src/autoencoding_rl/ReplayBuffer_updatable.py
|
c-rizz/autoencoding_rl
|
65775630e87184c8809a31a8ef980853d5b49f9f
|
[
"MIT"
] | null | null | null |
from stable_baselines3.common.buffers import ReplayBuffer
from stable_baselines3.common.buffers import DictReplayBuffer
import numpy as np
class ReplayBuffer_updatable(ReplayBuffer):
def update(self, buffer : ReplayBuffer):
if (self.optimize_memory_usage or buffer.optimize_memory_usage):
raise RuntimeError("Memory optimizatio is not supported")
copied = 0
while copied < buffer.size():
space_to_end = self.buffer_size - self.pos
to_copy = min(space_to_end, buffer.size()-copied)
self.actions[self.pos:self.pos + to_copy] = buffer.actions[copied:copied+to_copy]
self.rewards[self.pos:self.pos + to_copy] = buffer.rewards[copied:copied+to_copy]
self.dones[self.pos:self.pos + to_copy] = buffer.dones[copied:copied+to_copy]
self.observations[self.pos:self.pos + to_copy] = buffer.observations[copied:copied+to_copy]
self.next_observations[self.pos:self.pos + to_copy] = buffer.next_observations[copied:copied+to_copy]
if self.handle_timeout_termination:
self.timeouts[self.pos:self.pos + to_copy] = buffer.timeouts[copied:copied+to_copy]
self.pos += to_copy
if self.pos == self.buffer_size:
self.full = True
self.pos = 0
copied += to_copy
class DictReplayBuffer_updatable(DictReplayBuffer):
def update(self, buffer : DictReplayBuffer):
if (self.optimize_memory_usage or buffer.optimize_memory_usage):
raise RuntimeError("Memory optimizatio is not supported")
copied = 0
while copied < buffer.size():
space_to_end = self.buffer_size - self.pos
to_copy = min(space_to_end, buffer.size()-copied)
self.actions[self.pos:self.pos + to_copy] = buffer.actions[copied:copied+to_copy]
self.rewards[self.pos:self.pos + to_copy] = buffer.rewards[copied:copied+to_copy]
self.dones[self.pos:self.pos + to_copy] = buffer.dones[copied:copied+to_copy]
for key in self.observations.keys():
self.observations[key][self.pos:self.pos + to_copy] = buffer.observations[key][copied:copied+to_copy]
for key in self.next_observations.keys():
self.next_observations[key][self.pos:self.pos + to_copy] = buffer.next_observations[key][copied:copied+to_copy]
if self.handle_timeout_termination:
self.timeouts[self.pos:self.pos + to_copy] = buffer.timeouts[copied:copied+to_copy]
self.pos += to_copy
if self.pos == self.buffer_size:
self.full = True
self.pos = 0
copied += to_copy
| 48.482759
| 127
| 0.635135
| 353
| 2,812
| 4.88102
| 0.150142
| 0.130006
| 0.083575
| 0.12072
| 0.857806
| 0.842716
| 0.793964
| 0.793964
| 0.72664
| 0.645386
| 0
| 0.002904
| 0.265292
| 2,812
| 58
| 128
| 48.482759
| 0.831075
| 0
| 0
| 0.711111
| 0
| 0
| 0.024884
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.066667
| 0
| 0.155556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c2549983a5a6ec80d2ea351d84fe48cf641bd7b3
| 10,001
|
py
|
Python
|
sdk/purview/azure-purview-scanning/azure/purview/scanning/rest/system_scan_rulesets/_request_builders_py3.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/purview/azure-purview-scanning/azure/purview/scanning/rest/system_scan_rulesets/_request_builders_py3.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/purview/azure-purview-scanning/azure/purview/scanning/rest/system_scan_rulesets/_request_builders_py3.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING, Union
from azure.core.pipeline.transport._base import _format_url_section
from azure.purview.scanning.core.rest import HttpRequest
from msrest import Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
_SERIALIZER = Serializer()
def build_list_all_request(
**kwargs: Any
) -> HttpRequest:
"""List all system scan rulesets for an account.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow.
:return: Returns an :class:`~azure.purview.scanning.core.rest.HttpRequest` that you will pass to the client's `send_request` method.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow.
:rtype: ~azure.purview.scanning.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response_body == {
"count": "long (optional)",
"nextLink": "str (optional)",
"value": [
{
"kind": "SystemScanRuleset"
}
]
}
"""
api_version = "2018-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/systemScanRulesets')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
data_source_type: Union[str, "_models.DataSourceType"],
**kwargs: Any
) -> HttpRequest:
"""Get a system scan ruleset for a data source.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow.
:param data_source_type:
:type data_source_type: str or ~azure.purview.scanning.models.DataSourceType
:return: Returns an :class:`~azure.purview.scanning.core.rest.HttpRequest` that you will pass to the client's `send_request` method.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow.
:rtype: ~azure.purview.scanning.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response_body == {
"kind": "SystemScanRuleset"
}
"""
api_version = "2018-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/systemScanRulesets/datasources/{dataSourceType}')
path_format_arguments = {
'dataSourceType': _SERIALIZER.url("data_source_type", data_source_type, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_by_version_request(
version: int,
*,
data_source_type: Optional[Union[str, "_models.DataSourceType"]] = None,
**kwargs: Any
) -> HttpRequest:
"""Get a scan ruleset by version.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow.
:param version:
:type version: int
:keyword data_source_type:
:paramtype data_source_type: str or ~azure.purview.scanning.models.DataSourceType
:return: Returns an :class:`~azure.purview.scanning.core.rest.HttpRequest` that you will pass to the client's `send_request` method.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow.
:rtype: ~azure.purview.scanning.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response_body == {
"kind": "SystemScanRuleset"
}
"""
api_version = "2018-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/systemScanRulesets/versions/{version}')
path_format_arguments = {
'version': _SERIALIZER.url("version", version, 'int'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if data_source_type is not None:
query_parameters['dataSourceType'] = _SERIALIZER.query("data_source_type", data_source_type, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_latest_request(
*,
data_source_type: Optional[Union[str, "_models.DataSourceType"]] = None,
**kwargs: Any
) -> HttpRequest:
"""Get the latest version of a system scan ruleset.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow.
:keyword data_source_type:
:paramtype data_source_type: str or ~azure.purview.scanning.models.DataSourceType
:return: Returns an :class:`~azure.purview.scanning.core.rest.HttpRequest` that you will pass to the client's `send_request` method.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow.
:rtype: ~azure.purview.scanning.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response_body == {
"kind": "SystemScanRuleset"
}
"""
api_version = "2018-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/systemScanRulesets/versions/latest')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if data_source_type is not None:
query_parameters['dataSourceType'] = _SERIALIZER.query("data_source_type", data_source_type, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_versions_by_data_source_request(
*,
data_source_type: Optional[Union[str, "_models.DataSourceType"]] = None,
**kwargs: Any
) -> HttpRequest:
"""List system scan ruleset versions in Data catalog.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow.
:keyword data_source_type:
:paramtype data_source_type: str or ~azure.purview.scanning.models.DataSourceType
:return: Returns an :class:`~azure.purview.scanning.core.rest.HttpRequest` that you will pass to the client's `send_request` method.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow.
:rtype: ~azure.purview.scanning.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response_body == {
"count": "long (optional)",
"nextLink": "str (optional)",
"value": [
{
"kind": "SystemScanRuleset"
}
]
}
"""
api_version = "2018-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/systemScanRulesets/versions')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if data_source_type is not None:
query_parameters['dataSourceType'] = _SERIALIZER.query("data_source_type", data_source_type, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
| 35.214789
| 136
| 0.652135
| 1,147
| 10,001
| 5.540541
| 0.136007
| 0.039339
| 0.050669
| 0.041542
| 0.846578
| 0.837608
| 0.837608
| 0.83273
| 0.83273
| 0.83273
| 0
| 0.007183
| 0.220478
| 10,001
| 283
| 137
| 35.339223
| 0.807978
| 0.489251
| 0
| 0.754386
| 0
| 0
| 0.193405
| 0.050095
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04386
| false
| 0
| 0.04386
| 0
| 0.131579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c2f8e8f4f4cb3447bbc46a08eb00a126a3b3c64d
| 33
|
py
|
Python
|
utils/__init__.py
|
sreekanthmutthoju/ML_Medical_Domain
|
38b263419ddfb20cb4ec199f813fb2fe6b27a2eb
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
sreekanthmutthoju/ML_Medical_Domain
|
38b263419ddfb20cb4ec199f813fb2fe6b27a2eb
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
sreekanthmutthoju/ML_Medical_Domain
|
38b263419ddfb20cb4ec199f813fb2fe6b27a2eb
|
[
"MIT"
] | null | null | null |
print("util package is imported")
| 33
| 33
| 0.787879
| 5
| 33
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 33
| 1
| 33
| 33
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
667d31e9c87f39f0a07f69c2e676af418c3889bb
| 3,978
|
py
|
Python
|
Codefights/arcade/intro/level-12/60.Sudoku/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codefights/arcade/intro/level-12/60.Sudoku/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codefights/arcade/intro/level-12/60.Sudoku/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python3
from solution1 import sudoku as f
qa = [
([[1, 3, 2, 5, 4, 6, 9, 8, 7],
[4, 6, 5, 8, 7, 9, 3, 2, 1],
[7, 9, 8, 2, 1, 3, 6, 5, 4],
[9, 2, 1, 4, 3, 5, 8, 7, 6],
[3, 5, 4, 7, 6, 8, 2, 1, 9],
[6, 8, 7, 1, 9, 2, 5, 4, 3],
[5, 7, 6, 9, 8, 1, 4, 3, 2],
[2, 4, 3, 6, 5, 7, 1, 9, 8],
[8, 1, 9, 3, 2, 4, 7, 6, 5]],
True),
([[1, 3, 2, 5, 4, 6, 9, 2, 7],
[4, 6, 5, 8, 7, 9, 3, 8, 1],
[7, 9, 8, 2, 1, 3, 6, 5, 4],
[9, 2, 1, 4, 3, 5, 8, 7, 6],
[3, 5, 4, 7, 6, 8, 2, 1, 9],
[6, 8, 7, 1, 9, 2, 5, 4, 3],
[5, 7, 6, 9, 8, 1, 4, 3, 2],
[2, 4, 3, 6, 5, 7, 1, 9, 8],
[8, 1, 9, 3, 2, 4, 7, 6, 5]],
False),
([[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[1, 2, 3, 4, 5, 6, 7, 8, 9]],
False),
([[1, 3, 4, 2, 5, 6, 9, 8, 7],
[4, 6, 8, 5, 7, 9, 3, 2, 1],
[7, 9, 2, 8, 1, 3, 6, 5, 4],
[9, 2, 3, 1, 4, 5, 8, 7, 6],
[3, 5, 7, 4, 6, 8, 2, 1, 9],
[6, 8, 1, 7, 9, 2, 5, 4, 3],
[5, 7, 6, 9, 8, 1, 4, 3, 2],
[2, 4, 5, 6, 3, 7, 1, 9, 8],
[8, 1, 9, 3, 2, 4, 7, 6, 5]],
False),
([[1, 3, 2, 5, 4, 6, 9, 8, 7],
[4, 6, 5, 8, 7, 9, 3, 2, 1],
[7, 9, 8, 2, 1, 3, 6, 5, 4],
[9, 2, 1, 4, 3, 5, 8, 7, 6],
[3, 5, 4, 7, 6, 8, 2, 1, 9],
[6, 8, 7, 1, 9, 2, 5, 4, 3],
[5, 4, 6, 9, 8, 1, 4, 3, 2],
[2, 7, 3, 6, 5, 7, 1, 9, 8],
[8, 1, 9, 3, 2, 4, 7, 6, 5]],
False),
([[1, 2, 3, 4, 5, 6, 7, 8, 9],
[4, 6, 5, 8, 7, 9, 3, 2, 1],
[7, 9, 8, 2, 1, 3, 6, 5, 4],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[4, 6, 5, 8, 7, 9, 3, 2, 1],
[7, 9, 8, 2, 1, 3, 6, 5, 4],
[1, 2, 3, 4, 5, 6, 7, 8, 9],
[4, 6, 5, 8, 7, 9, 3, 2, 1],
[7, 9, 8, 2, 1, 3, 6, 5, 4]],
False),
([[5, 3, 4, 6, 7, 8, 9, 1, 2],
[6, 7, 2, 1, 9, 5, 3, 4, 8],
[1, 9, 8, 3, 4, 2, 5, 6, 7],
[8, 5, 9, 9, 6, 1, 4, 2, 3],
[4, 2, 6, 8, 5, 3, 7, 9, 1],
[7, 1, 3, 7, 2, 4, 8, 5, 6],
[9, 6, 1, 5, 3, 7, 2, 8, 4],
[2, 8, 7, 4, 1, 9, 6, 3, 5],
[3, 4, 5, 2, 8, 6, 1, 7, 9]],
False),
([[5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5]],
False),
([[5, 3, 4, 6, 7, 8, 9, 1, 2],
[6, 7, 2, 3, 9, 5, 3, 4, 8],
[1, 9, 8, 1, 4, 2, 5, 6, 7],
[8, 5, 9, 7, 6, 1, 4, 2, 3],
[4, 2, 6, 8, 5, 3, 7, 9, 1],
[7, 1, 3, 9, 2, 4, 8, 5, 6],
[9, 6, 1, 5, 3, 7, 2, 8, 4],
[2, 8, 7, 4, 1, 9, 6, 3, 5],
[3, 4, 5, 2, 8, 6, 1, 7, 9]],
False),
([[5, 3, 4, 6, 7, 8, 9, 1, 2],
[6, 7, 2, 1, 9, 5, 3, 4, 8],
[1, 9, 8, 3, 4, 2, 5, 6, 7],
[8, 5, 9, 7, 6, 1, 4, 2, 3],
[4, 2, 6, 8, 5, 3, 7, 9, 1],
[7, 1, 3, 9, 2, 4, 8, 5, 6],
[9, 6, 1, 5, 3, 7, 2, 8, 4],
[2, 5, 7, 4, 1, 9, 6, 3, 5],
[3, 4, 5, 2, 8, 6, 1, 7, 9]],
False),
([[1, 2, 3, 4, 5, 6, 7, 8, 9],
[4, 5, 6, 7, 8, 9, 1, 2, 3],
[7, 8, 9, 1, 2, 3, 4, 5, 6],
[2, 3, 4, 5, 6, 7, 8, 9, 1],
[3, 4, 5, 6, 7, 8, 9, 1, 2],
[5, 6, 7, 8, 9, 1, 2, 3, 4],
[6, 7, 8, 9, 1, 2, 3, 4, 5],
[8, 9, 1, 2, 3, 4, 5, 6, 7],
[9, 1, 2, 3, 4, 5, 6, 7, 8]],
False),
]
for *q, a in qa:
for i, e in enumerate(q):
print('input{0}: {1}'.format(i + 1, e))
ans = f(*q)
if ans != a:
print(' [failed]')
print(' output:', ans)
print(' expected:', a)
else:
print(' [ok]')
print(' output:', ans)
print()
| 30.6
| 47
| 0.278783
| 950
| 3,978
| 1.167368
| 0.038947
| 0.144274
| 0.213706
| 0.281335
| 0.818756
| 0.814247
| 0.803427
| 0.778179
| 0.756537
| 0.692516
| 0
| 0.38654
| 0.417295
| 3,978
| 129
| 48
| 30.837209
| 0.09189
| 0.00176
| 0
| 0.664
| 0
| 0
| 0.016125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008
| 0
| 0.008
| 0.056
| 0
| 0
| 1
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
669e83ffefa3d93435adb82db21886f73e81c79d
| 7,311
|
py
|
Python
|
global/commands/idl.py
|
hidaruma/caty
|
f71d2ab0a001ea4f7a96a6e02211187ebbf54773
|
[
"MIT"
] | null | null | null |
global/commands/idl.py
|
hidaruma/caty
|
f71d2ab0a001ea4f7a96a6e02211187ebbf54773
|
[
"MIT"
] | null | null | null |
global/commands/idl.py
|
hidaruma/caty
|
f71d2ab0a001ea4f7a96a6e02211187ebbf54773
|
[
"MIT"
] | null | null | null |
from reification import *
import os
class GenerateFacilityClass(Command):
def execute(self, cls_data):
if not cls_data.get('anno', {}).get('__signature', False):
throw_caty_exception(u'InvalidInput', u'$$.anno.__signature=$val', val=cls_data.get('anno', {}).get('__signature', u'undefined'))
name = cls_data['name']
app_name = cls_data['location']['app'].strip('::')
module_name = (cls_data['location']['pkg'] + cls_data['location']['mod']).strip(':')
reifier = ShallowReifier()
system = self.current_app._system
app = system.get_app(app_name)
module = app._schema_module.get_module(module_name)
cls = module.get_class(name)
r = []
for c in cls.command_ns.values():
r.append(reifier.reify_command(c))
src = u'\n'.join(list(self._generate(name, r)))
return {
u'source': src,
u'moduleName': u'interfaces.%s.%s' % (module_name, name)
}
def _generate(self, cls_name, commands):
yield u'from caty.core.spectypes import UNDEFINED'
yield u'from caty.core.facility import Facility, AccessManager'
yield u'\n'
yield u'class %sBase(Facility):' % cls_name
yield u' am = AccessManager()'
for c in commands:
name = c['name'].replace('-', '_')
input_type = c['profile']['input']
arg_type = c['profile']['args']
if input_type.startswith('void'):
input_type = u''
arg_types = arg_type.strip('[]').split(',')
anno = c.get('anno', {})
transactional = True
if anno.get('reader'):
yield u' @am.read'
elif anno.get('updater'):
yield u' @am.update'
else:
transactional = False
if anno.get('static'):
yield u' @classmethod'
a1 = ['cls']
a2 = []
else:
a1 = ['self']
a2 = []
if input_type:
a1.append('input')
a2.append('input')
for i in arg_types:
if not i:
continue
n = i.split(' ')
if n[0].endswith('?'):
a1.append('%s=UNDEFINED' % n[-1].strip(' '))
a2.append(n[-1].strip())
elif n[0].endswith('*'):
a1.append('*%s' % n[-1].strip())
a2.append('*%s' % n[-1].strip())
else:
a1.append(n[-1].strip())
a2.append(n[-1].strip())
if transactional:
if anno.get('static'):
yield u' @classmethod'
yield u' def %s(%s):' % (name, u', '.join(a1))
yield u' return cls._%s(%s)' % (name, u', '.join(a2))
else:
yield u' def %s(%s):' % (name, u', '.join(a1))
yield u' return self._%s(%s)' % (name, u', '.join(a2))
yield u''
yield u' def _%s(%s):' % (name, u', '.join(a1))
yield u' raise NotImplementedError(u"%s._%s")' % ('self.__class__.__name__', name)
yield u''
else:
yield u' def %s(%s):' % (name, u', '.join(a1))
yield u' raise NotImplementedError(u"%s.%s")' % ('self.__class__.__name__', name)
yield u''
class GeneratePyClass(Command):
def execute(self, cls_data):
if not cls_data.get('anno', {}).get('__signature', False):
throw_caty_exception(u'InvalidInput', u'$$.anno.__signature=$val', val=cls_data.get('anno', {}).get('__signature', u'undefined'))
name = cls_data['name']
app_name = cls_data['location']['app'].strip('::')
module_name = (cls_data['location']['pkg'] + cls_data['location']['mod']).strip(':')
reifier = ShallowReifier()
system = self.current_app._system
app = system.get_app(app_name)
module = app._schema_module.get_module(module_name)
cls = module.get_class(name)
r = []
for c in cls.command_ns.values():
r.append(reifier.reify_command(c))
src = u'\n'.join(list(self._generate(name, r)))
return {
u'source': src,
u'moduleName': u'interfaces.%s.%s' % (module_name, name)
}
def _generate(self, cls_name, commands):
yield u'\n'
yield u'class %sBase(object):' % cls_name
for c in commands:
name = c['name'].replace('-', '_')
input_type = c['profile']['input']
arg_type = c['profile']['args']
if input_type.startswith('void'):
input_type = u''
arg_types = arg_type.strip('[]').split(',')
anno = c.get('anno', {})
transactional = True
if anno.get('static'):
yield u' @classmethod'
a1 = ['cls']
a2 = []
else:
a1 = ['self']
a2 = []
if input_type:
a1.append('input')
a2.append('input')
for i in arg_types:
if not i:
continue
n = i.split(' ')
if n[0].endswith('?'):
a1.append('%s=UNDEFINED' % n[-1].strip(' '))
a2.append(n[-1].strip())
elif n[0].endswith('*'):
a1.append('*%s' % n[-1].strip())
a2.append('*%s' % n[-1].strip())
else:
a1.append(n[-1].strip())
a2.append(n[-1].strip())
if transactional:
if anno.get('static'):
yield u' @classmethod'
yield u' def %s(%s):' % (name, u', '.join(a1))
yield u' return cls._%s(%s)' % (name, u', '.join(a2))
else:
yield u' def %s(%s):' % (name, u', '.join(a1))
yield u' return self._%s(%s)' % (name, u', '.join(a2))
yield u''
yield u' def _%s(%s):' % (name, u', '.join(a1))
yield u' raise NotImplementedError(u"%s._%s")' % (cls_name, name)
yield u''
else:
yield u' def %s(%s):' % (name, u', '.join(a1))
yield u' raise NotImplementedError(u"%s.%s")' % (cls_name, name)
yield u''
class WritePyClass(Command):
def execute(self, data):
pkg_dir = None
chunk = data['moduleName'].split('.')
p = [u'']
for c in chunk[:-1]:
p.append(c)
path = u'/'.join(p)
dir = self.sysfiles.lib.opendir(path)
if not dir.exists:
dir.create()
with self.sysfiles.lib.open(dir.path + '/__init__.py', 'wb') as f:
f.write(u'')
p.append(chunk[-1])
path = u'/'.join(p) + '.py'
with self.sysfiles.lib.open(path, 'wb') as f:
f.write(data['source'])
| 38.888298
| 141
| 0.448913
| 812
| 7,311
| 3.900246
| 0.139163
| 0.066309
| 0.026524
| 0.026524
| 0.855384
| 0.821282
| 0.821282
| 0.808652
| 0.808652
| 0.808652
| 0
| 0.012043
| 0.386678
| 7,311
| 187
| 142
| 39.096257
| 0.694246
| 0
| 0
| 0.782353
| 1
| 0
| 0.172367
| 0.029001
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.023529
| 0
| 0.082353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
66b9aab1fbbf699d33eaae101fdbfc7c60a8d804
| 4,350
|
py
|
Python
|
tests/test_async_ws_consumer.py
|
LittlePony/django-channels2-jsonrpc
|
4b61358bb17731efb31fc8bd5b95abbd8a72ebc5
|
[
"MIT"
] | 3
|
2020-05-25T17:49:00.000Z
|
2021-06-28T12:06:57.000Z
|
tests/test_async_ws_consumer.py
|
LittlePony/django-channels2-jsonrpc
|
4b61358bb17731efb31fc8bd5b95abbd8a72ebc5
|
[
"MIT"
] | null | null | null |
tests/test_async_ws_consumer.py
|
LittlePony/django-channels2-jsonrpc
|
4b61358bb17731efb31fc8bd5b95abbd8a72ebc5
|
[
"MIT"
] | null | null | null |
import pytest
from channels.testing import WebsocketCommunicator
from channels_jsonrpc import AsyncJsonRpcWebsocketConsumer
from .conftest import INVALID_PARAMS, VALID_PARAMS
@pytest.mark.parametrize('test_input,expected', INVALID_PARAMS)
@pytest.mark.asyncio
async def test_async_consumer_errors_handled(test_input, expected):
"""
Tests that AsyncJsonWebsocketConsumer is implemented correctly.
"""
results = {}
class TestConsumer(AsyncJsonRpcWebsocketConsumer):
async def connect(self):
await self.accept()
async def receive_json(self, data=None, **kwargs):
results['received'] = data
await super().receive_json(data, **kwargs)
@TestConsumer.rpc_method()
async def registered_ping(param):
return 'pong'
@TestConsumer.rpc_method()
async def registered_ping_named(param=None):
return 'pong'
app = TestConsumer()
# Open a connection
communicator = WebsocketCommunicator(app, '/ws/')
connected, _ = await communicator.connect()
assert connected
# Test sending malformed rpc
await communicator.send_json_to(test_input)
response = await communicator.receive_json_from()
assert response == expected
assert results['received'] == test_input
@pytest.mark.parametrize('test_input,expected', VALID_PARAMS)
@pytest.mark.asyncio
async def test_async_json_websocket_consumer(test_input, expected):
"""
Tests that AsyncJsonWebsocketConsumer is implemented correctly.
"""
results = {}
class TestConsumer(AsyncJsonRpcWebsocketConsumer):
async def connect(self):
await self.accept()
async def receive_json(self, data=None, **kwargs):
results['received'] = data
await super().receive_json(data, **kwargs)
@TestConsumer.rpc_method()
async def registered_ping(param):
return 'pong'
@TestConsumer.rpc_method()
async def registered_ping_named(param=None):
return 'pong'
@TestConsumer.rpc_method()
async def registered_ping_bound(param, **kwargs):
consumer = kwargs['consumer']
return consumer.__class__.__name__
app = TestConsumer()
# Open a connection
communicator = WebsocketCommunicator(app, '/ws/')
connected, _ = await communicator.connect()
assert connected
# Test sending valid rpc
await communicator.send_json_to(test_input)
response = await communicator.receive_json_from()
assert response == expected
assert results['received'] == test_input
# @pytest.mark.asyncio
# async def test_async_json_websocket_consumer():
# """
# Tests that AsyncJsonWebsocketConsumer is implemented correctly.
# """
# results = {}
#
# class TestConsumer(AsyncJsonRpcWebsocketConsumer):
# async def connect(self):
# await self.accept()
#
# async def receive_json(self, data=None, **kwargs):
# results['received'] = data
# await super().receive_json(data, **kwargs)
#
# @TestConsumer.rpc_method()
# async def registered_ping(param):
# return 'pong'
#
# @TestConsumer.rpc_method()
# async def registered_ping_named(param=None):
# return 'pong'
#
# @TestConsumer.rpc_method()
# async def registered_ping_bound(param, **kwargs):
# consumer = kwargs['consumer']
# return consumer.__class__.__name__
#
# app = TestConsumer()
#
# # Open a connection
# communicator = WebsocketCommunicator(app, '/ws/')
# connected, _ = await communicator.connect()
# assert connected
# # Test sending malformed rpc (single param) TODO: exposed internal error message
# request = {'id': 1, 'jsonrpc': '2.0', 'method': 'registered_ping', 'params': [1, 2]}
# await communicator.send_json_to(request)
# response = await communicator.receive_json_from()
# assert response == 'pong'
# assert results['received'] == request
# # Test sending rpc (single named param with invalid name) TODO: exposed internal error message
# request = {'id': 1, 'jsonrpc': '2.0', 'method': 'registered_ping_named', 'params': {'a': 1}}
# await communicator.send_json_to(request)
# response = await communicator.receive_json_from()
# assert response == VALID_METHOD
# assert results['received'] == request
| 31.985294
| 100
| 0.678851
| 457
| 4,350
| 6.262582
| 0.172867
| 0.047519
| 0.0587
| 0.072676
| 0.881202
| 0.881202
| 0.858141
| 0.858141
| 0.842068
| 0.842068
| 0
| 0.002625
| 0.211954
| 4,350
| 135
| 101
| 32.222222
| 0.832264
| 0.396782
| 0
| 0.803571
| 0
| 0
| 0.042289
| 0
| 0
| 0
| 0
| 0.007407
| 0.107143
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.196429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dd0cc15d4ad19f363c9eed729985248f2ccb93a0
| 1,427
|
py
|
Python
|
app/models/samples/energyconstructionsetabridged.py
|
tanushree04/energy-model-schema-1
|
ea92b0d300f1bf97633ff19ae64006d0ea2fed21
|
[
"BSD-3-Clause"
] | null | null | null |
app/models/samples/energyconstructionsetabridged.py
|
tanushree04/energy-model-schema-1
|
ea92b0d300f1bf97633ff19ae64006d0ea2fed21
|
[
"BSD-3-Clause"
] | null | null | null |
app/models/samples/energyconstructionsetabridged.py
|
tanushree04/energy-model-schema-1
|
ea92b0d300f1bf97633ff19ae64006d0ea2fed21
|
[
"BSD-3-Clause"
] | null | null | null |
construction_set = {
'type': 'ConstructionSetAbridged',
'name': 'Construction Set',
'wall_set': {
'type': 'WallSetAbridged',
'exterior_construction': 'Exterior Wall ASHRAE 2009'
},
'floor_set': {
'type': 'FloorSetAbridged',
'interior_construction': 'Internal Floor'
},
'roof_ceiling_set': {
'type': 'RoofCeilingSetAbridged',
'exterior_construction': 'Exterior Roof ASHRAE 2009'
},
'aperture_set': {
'type': 'ApertureSetAbridged',
'fixed_window_construction': 'Exterior Window'
}
}
wall_set = {
'type': 'WallSetAbridged',
'exterior_construction': 'Exterior Wall ASHRAE 2009'
}
floor_set = {
'type': 'FloorSetAbridged',
'interior_construction': 'Internal Floor'
}
roof_ceiling_set = {
'type': 'RoofCeilingSetAbridged',
'exterior_construction': 'Exterior Roof ASHRAE 2009'
}
aperture_set = {
'type': 'ApertureSetAbridged',
'fixed_window_construction': 'Exterior Window',
}
door_set = {
'type': 'DoorSet',
'exterior_construction': 'Exterior Door'
}
construction_set_1 = {
'type': 'ConstructionSetAbridged',
'name': 'Construction Set 1',
'wall_set': {
'type': 'WallSetAbridged',
'exterior_construction': 'Exterior Wall ASHRAE 2009'
},
'floor_set': {
'type': 'FloorSetAbridged',
'interior_construction': 'Internal Floor'
}
}
| 23.393443
| 60
| 0.627891
| 118
| 1,427
| 7.347458
| 0.20339
| 0.096886
| 0.193772
| 0.089965
| 0.905421
| 0.799308
| 0.799308
| 0.799308
| 0.799308
| 0.799308
| 0
| 0.020055
| 0.231254
| 1,427
| 60
| 61
| 23.783333
| 0.770283
| 0
| 0
| 0.461538
| 0
| 0
| 0.583742
| 0.230554
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dd77554204a59e3d9a6f7893dbc38fccbf0a7883
| 197
|
py
|
Python
|
highway_env/envs/__init__.py
|
JadeQG/highway-env
|
93d2e23de5db672f4f0feb67db0c82d0c86119ab
|
[
"MIT"
] | null | null | null |
highway_env/envs/__init__.py
|
JadeQG/highway-env
|
93d2e23de5db672f4f0feb67db0c82d0c86119ab
|
[
"MIT"
] | null | null | null |
highway_env/envs/__init__.py
|
JadeQG/highway-env
|
93d2e23de5db672f4f0feb67db0c82d0c86119ab
|
[
"MIT"
] | 1
|
2020-04-30T02:00:52.000Z
|
2020-04-30T02:00:52.000Z
|
from __future__ import absolute_import
from highway_env.envs.highway_env import HighwayEnv
from highway_env.envs.merge_env import MergeEnv
from highway_env.envs.roundabout_env import RoundaboutEnv
| 39.4
| 57
| 0.888325
| 29
| 197
| 5.655172
| 0.413793
| 0.243902
| 0.256098
| 0.329268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081218
| 197
| 4
| 58
| 49.25
| 0.906077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
dd8e63902000a34701f53218f968d8b37aee650a
| 3,156
|
py
|
Python
|
pytorch_toolbelt/modules/encoders/timm/hrnet.py
|
azkalot1/pytorch-toolbelt
|
9d7544fa32a6c6588f9f8c4525ba702700ac01cc
|
[
"MIT"
] | 1,281
|
2019-03-17T18:32:39.000Z
|
2022-03-31T03:47:22.000Z
|
pytorch_toolbelt/modules/encoders/timm/hrnet.py
|
azkalot1/pytorch-toolbelt
|
9d7544fa32a6c6588f9f8c4525ba702700ac01cc
|
[
"MIT"
] | 28
|
2019-04-05T10:49:25.000Z
|
2022-03-11T10:40:28.000Z
|
pytorch_toolbelt/modules/encoders/timm/hrnet.py
|
azkalot1/pytorch-toolbelt
|
9d7544fa32a6c6588f9f8c4525ba702700ac01cc
|
[
"MIT"
] | 99
|
2019-03-18T08:40:18.000Z
|
2022-03-26T10:52:57.000Z
|
from .common import GenericTimmEncoder
from ..common import EncoderModule, _take, make_n_channel_input
from ...activations import ACT_RELU, get_activation_block
__all__ = ["HRNetW18Encoder", "HRNetW32Encoder", "HRNetW48Encoder", "TimmHRNetW18SmallV2Encoder"]
class HRNetW18Encoder(GenericTimmEncoder):
def __init__(self, pretrained=True, use_incre_features: bool = True, layers=None):
from timm.models import hrnet
encoder = hrnet.hrnet_w18(
pretrained=pretrained,
feature_location="incre" if use_incre_features else "",
features_only=True,
out_indices=(0, 1, 2, 3, 4),
)
super().__init__(encoder, layers)
def forward(self, x):
y = self.encoder.forward(x)
return _take(y, self._layers)
def change_input_channels(self, input_channels: int, mode="auto", **kwargs):
self.encoder.conv1 = make_n_channel_input(self.encoder.conv1, input_channels, mode, **kwargs)
return self
class HRNetW32Encoder(GenericTimmEncoder):
def __init__(self, pretrained=True, use_incre_features: bool = True, layers=None):
from timm.models import hrnet
encoder = hrnet.hrnet_w32(
pretrained=pretrained,
feature_location="incre" if use_incre_features else "",
features_only=True,
out_indices=(0, 1, 2, 3, 4),
)
super().__init__(encoder, layers)
def forward(self, x):
y = self.encoder.forward(x)
return _take(y, self._layers)
def change_input_channels(self, input_channels: int, mode="auto", **kwargs):
self.encoder.conv1 = make_n_channel_input(self.encoder.conv1, input_channels, mode, **kwargs)
return self
class HRNetW48Encoder(GenericTimmEncoder):
def __init__(self, pretrained=True, use_incre_features: bool = True, layers=None):
from timm.models import hrnet
encoder = hrnet.hrnet_w48(
pretrained=pretrained,
feature_location="incre" if use_incre_features else "",
features_only=True,
out_indices=(0, 1, 2, 3, 4),
)
super().__init__(encoder, layers)
def forward(self, x):
y = self.encoder.forward(x)
return _take(y, self._layers)
def change_input_channels(self, input_channels: int, mode="auto", **kwargs):
self.encoder.conv1 = make_n_channel_input(self.encoder.conv1, input_channels, mode, **kwargs)
return self
class TimmHRNetW18SmallV2Encoder(GenericTimmEncoder):
def __init__(self, pretrained=True, use_incre_features: bool = True, layers=None, activation=ACT_RELU):
from timm.models import hrnet
encoder = hrnet.hrnet_w18_small_v2(
pretrained=pretrained,
feature_location="incre" if use_incre_features else "",
features_only=True,
out_indices=(0, 1, 2, 3, 4),
)
super().__init__(encoder, layers)
def change_input_channels(self, input_channels: int, mode="auto", **kwargs):
self.encoder.conv1 = make_n_channel_input(self.encoder.conv1, input_channels, mode, **kwargs)
return self
| 36.697674
| 107
| 0.665082
| 373
| 3,156
| 5.327078
| 0.176944
| 0.07851
| 0.064419
| 0.042778
| 0.843986
| 0.843986
| 0.843986
| 0.843986
| 0.843986
| 0.819829
| 0
| 0.022662
| 0.230989
| 3,156
| 85
| 108
| 37.129412
| 0.796045
| 0
| 0
| 0.738462
| 0
| 0
| 0.033904
| 0.008238
| 0
| 0
| 0
| 0
| 0
| 1
| 0.169231
| false
| 0
| 0.107692
| 0
| 0.446154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
665577957a42c32cbd5128b3672bde36efdcad20
| 151
|
py
|
Python
|
app/web_ui/views/readme_view.py
|
olderlong/WvsCCServer
|
20f4fdc89f2b6087ebf50044597e3caa375fe0d7
|
[
"Apache-2.0"
] | 1
|
2019-05-09T08:48:26.000Z
|
2019-05-09T08:48:26.000Z
|
app/web_ui/views/readme_view.py
|
olderlong/WvsCCServer
|
20f4fdc89f2b6087ebf50044597e3caa375fe0d7
|
[
"Apache-2.0"
] | null | null | null |
app/web_ui/views/readme_view.py
|
olderlong/WvsCCServer
|
20f4fdc89f2b6087ebf50044597e3caa375fe0d7
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# _*_coding:utf-8 -*_
from flask import render_template
def readme():
return render_template("readme.html", title="使用说明")
| 18.875
| 55
| 0.708609
| 21
| 151
| 4.857143
| 0.857143
| 0.27451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007692
| 0.139073
| 151
| 8
| 55
| 18.875
| 0.776923
| 0.271523
| 0
| 0
| 0
| 0
| 0.137615
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
b0824ad6a1ddc8bab3399d7b7195698df0d49128
| 12,293
|
py
|
Python
|
data_loader/lb_loader.py
|
hackerekcah/dcase19_task1_hitsplab
|
24579eac72d78951ecd53a6462b32a9504d18a28
|
[
"MIT"
] | 3
|
2019-07-10T07:36:49.000Z
|
2020-01-15T09:14:27.000Z
|
data_loader/lb_loader.py
|
hackerekcah/dcase19_task1_hitsplab
|
24579eac72d78951ecd53a6462b32a9504d18a28
|
[
"MIT"
] | null | null | null |
data_loader/lb_loader.py
|
hackerekcah/dcase19_task1_hitsplab
|
24579eac72d78951ecd53a6462b32a9504d18a28
|
[
"MIT"
] | null | null | null |
from torch.utils.data import DataLoader
from data_loader.transformer import *
from torchvision.transforms import Compose
from data_loader.data_sets import *
class LB_Loader:
def __init__(self, is_divide_variance=True):
self.is_divide_variance = is_divide_variance
def train(self, batch_size=128, shuffle=True):
return DataLoader(dataset=TrainSet(is_divide_variance=self.is_divide_variance,
transform=Compose([TripleChanne1(), ToTensor()])),
batch_size=batch_size, shuffle=shuffle, drop_last=True, num_workers=3)
def val(self, batch_size=128):
return DataLoader(dataset=ValSet(is_divide_variance=self.is_divide_variance,
transform=Compose([TripleChanne1(), ToTensor()])),
batch_size=batch_size, shuffle=False, num_workers=3)
def dev(self, batch_size=128, shuffle=True):
return DataLoader(dataset=DevDataSet(is_divide_variance=self.is_divide_variance,
transform=Compose([TripleChanne1(), ToTensor()])),
batch_size=batch_size, shuffle=shuffle, drop_last=True, num_workers=3)
def lb(self, batch_size=128):
return DataLoader(dataset=LB_Dataset(is_divide_variance=self.is_divide_variance,
transform=Compose([TripleChanne1(),
ToTensor()])),
batch_size=batch_size, shuffle=False, num_workers=3)
class LB_SPL_Loader:
def __init__(self, is_divide_variance=True):
self.is_divide_variance = is_divide_variance
def train(self, batch_size=128, shuffle=True):
return DataLoader(dataset=TrainSet(is_divide_variance=self.is_divide_variance,
transform=Compose([RandomSPL(spl_var=1.0, prob=0.5),
TripleChanne1(),
ToTensor()])),
batch_size=batch_size, shuffle=shuffle, drop_last=True, num_workers=3)
def val(self, batch_size=128):
return DataLoader(dataset=ValSet(is_divide_variance=self.is_divide_variance,
transform=Compose([TripleChanne1(), ToTensor()])),
batch_size=batch_size, shuffle=False, num_workers=3)
def dev(self, batch_size=128, shuffle=True):
return DataLoader(dataset=DevDataSet(is_divide_variance=self.is_divide_variance,
transform=Compose([RandomSPL(spl_var=1.0, prob=0.5),
TripleChanne1(),
ToTensor()])),
batch_size=batch_size, shuffle=shuffle, drop_last=True, num_workers=3)
def lb(self, batch_size=128):
return DataLoader(dataset=LB_Dataset(is_divide_variance=self.is_divide_variance,
transform=Compose([TripleChanne1(),
ToTensor()
])),
batch_size=batch_size, shuffle=False, num_workers=3)
class LB_Medfilter_Loader:
def __init__(self, is_divide_variance=True):
self.is_divide_variance = is_divide_variance
def train(self, batch_size=128, shuffle=True):
return DataLoader(dataset=TrainSet(is_divide_variance=self.is_divide_variance,
transform=Compose([MedfilterChannel(width=21, height=7, channel_idx=0),
TripleChanne1(),
ToTensor()])),
batch_size=batch_size, shuffle=shuffle, drop_last=True, num_workers=3)
def val(self, batch_size=128):
return DataLoader(dataset=ValSet(is_divide_variance=self.is_divide_variance,
transform=Compose([MedfilterChannel(width=21, height=7, channel_idx=0),
TripleChanne1(),
ToTensor()])),
batch_size=batch_size, shuffle=False, num_workers=3)
def dev(self, batch_size=128, shuffle=True):
return DataLoader(dataset=DevDataSet(is_divide_variance=self.is_divide_variance,
transform=Compose([MedfilterChannel(width=21, height=7, channel_idx=0),
TripleChanne1(),
ToTensor()])),
batch_size=batch_size, shuffle=shuffle, drop_last=True, num_workers=3)
def lb(self, batch_size=128):
return DataLoader(dataset=LB_Dataset(is_divide_variance=self.is_divide_variance,
transform=Compose([MedfilterChannel(width=21, height=7, channel_idx=0),
TripleChanne1(),
ToTensor()])),
batch_size=batch_size, shuffle=False, num_workers=3)
class LB_MeanSub_Loader:
def __init__(self, is_divide_variance=True):
self.is_divide_variance = is_divide_variance
def train(self, batch_size=128, shuffle=True):
return DataLoader(dataset=TrainSet(is_divide_variance=self.is_divide_variance,
transform=Compose([MeanSubtractionChannel(channel_idx=0),
TripleChanne1(),
ToTensor()])),
batch_size=batch_size, shuffle=shuffle, drop_last=True, num_workers=3)
def val(self, batch_size=128):
return DataLoader(dataset=ValSet(is_divide_variance=self.is_divide_variance,
transform=Compose([MeanSubtractionChannel(channel_idx=0),
TripleChanne1(),
ToTensor()])),
batch_size=batch_size, shuffle=False, num_workers=3)
def dev(self, batch_size=128, shuffle=True):
return DataLoader(dataset=DevDataSet(is_divide_variance=self.is_divide_variance,
transform=Compose([MeanSubtractionChannel(channel_idx=0),
TripleChanne1(),
ToTensor()])),
batch_size=batch_size, shuffle=shuffle, drop_last=True, num_workers=3)
def lb(self, batch_size=128):
return DataLoader(dataset=LB_Dataset(is_divide_variance=self.is_divide_variance,
transform=Compose([MeanSubtractionChannel(channel_idx=0),
TripleChanne1(),
ToTensor()])),
batch_size=batch_size, shuffle=False, num_workers=3)
class Dev_Loader:
def __init__(self, is_divide_variance=True):
self.is_divide_variance = is_divide_variance
def train(self, batch_size=128, shuffle=True):
return DataLoader(dataset=TrainSet(is_divide_variance=self.is_divide_variance,
transform=Compose([TripleChanne1(), ToTensor()])),
batch_size=batch_size, shuffle=shuffle, drop_last=True, num_workers=3)
def val(self, batch_size=128):
bc_val_loader = DataLoader(dataset=DeviceWiseValSet(is_divide_variance=self.is_divide_variance, device='bc',
transform=Compose([TripleChanne1(), ToTensor()])),
batch_size=batch_size, shuffle=False, num_workers=3)
a_val_loader = DataLoader(dataset=DeviceWiseValSet(is_divide_variance=self.is_divide_variance, device='a',
transform=Compose([TripleChanne1(), ToTensor()])),
batch_size=batch_size, shuffle=False, num_workers=3)
val_loader = {'a': a_val_loader, 'bc': bc_val_loader}
return val_loader
class Dev_Medfilter_Loader:
def __init__(self, is_divide_variance=True):
self.is_divide_variance = is_divide_variance
def train(self, batch_size=128, shuffle=True):
return DataLoader(dataset=TrainSet(is_divide_variance=self.is_divide_variance,
transform=Compose([MedfilterChannel(width=21, height=7, channel_idx=0),
TripleChanne1(),
ToTensor()])),
batch_size=batch_size, shuffle=shuffle, drop_last=True, num_workers=3)
def val(self, batch_size=128):
a_val_loader = DataLoader(dataset=DeviceWiseValSet(is_divide_variance=self.is_divide_variance, device='a',
transform=Compose([MedfilterChannel(width=21, height=7,
channel_idx=0),
TripleChanne1(), ToTensor()])),
batch_size=batch_size, shuffle=False, num_workers=3)
bc_val_loader = DataLoader(dataset=DeviceWiseValSet(is_divide_variance=self.is_divide_variance, device='bc',
transform=Compose([MedfilterChannel(width=21, height=7,
channel_idx=0),
TripleChanne1(), ToTensor()])),
batch_size=batch_size, shuffle=False, num_workers=3)
return {'a': a_val_loader, 'bc': bc_val_loader}
class Dev_MeanSub_Loader:
def __init__(self, is_divide_variance=True):
self.is_divide_variance = is_divide_variance
def train(self, batch_size=128, shuffle=True):
return DataLoader(dataset=TrainSet(is_divide_variance=self.is_divide_variance,
transform=Compose([MeanSubtractionChannel(channel_idx=0),
TripleChanne1(),
ToTensor()])),
batch_size=batch_size, shuffle=shuffle, drop_last=True, num_workers=3)
def val(self, batch_size=128):
a_val_loader = DataLoader(dataset=DeviceWiseValSet(is_divide_variance=self.is_divide_variance, device='a',
transform=Compose([MeanSubtractionChannel(channel_idx=0),
TripleChanne1(), ToTensor()])),
batch_size=batch_size, shuffle=False, num_workers=3)
bc_val_loader = DataLoader(dataset=DeviceWiseValSet(is_divide_variance=self.is_divide_variance, device='bc',
transform=Compose([MeanSubtractionChannel(channel_idx=0),
TripleChanne1(), ToTensor()])),
batch_size=batch_size, shuffle=False, num_workers=3)
return {'a': a_val_loader, 'bc': bc_val_loader}
if __name__ == '__main__':
loader = LB_Loader()
dev = loader.train(batch_size=128)
for i, x in enumerate(dev):
print(x.size())
| 61.159204
| 116
| 0.51631
| 1,113
| 12,293
| 5.389039
| 0.06469
| 0.109537
| 0.189396
| 0.130043
| 0.956152
| 0.956152
| 0.956152
| 0.956152
| 0.952151
| 0.952151
| 0
| 0.022059
| 0.402587
| 12,293
| 201
| 117
| 61.159204
| 0.794662
| 0
| 0
| 0.878049
| 0
| 0
| 0.002115
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176829
| false
| 0
| 0.02439
| 0.115854
| 0.378049
| 0.006098
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 9
|
b0b93dbde046be26fe91be3f103164df74c458f5
| 564
|
py
|
Python
|
updater.py
|
Cagrimanoka/yokatlas-crawler
|
63ae2a3ca74295fb12876b60d00d4d9f7f58d1fd
|
[
"MIT"
] | 1
|
2021-09-16T20:01:28.000Z
|
2021-09-16T20:01:28.000Z
|
updater.py
|
Cagrimanoka/yokatlas-crawler
|
63ae2a3ca74295fb12876b60d00d4d9f7f58d1fd
|
[
"MIT"
] | null | null | null |
updater.py
|
Cagrimanoka/yokatlas-crawler
|
63ae2a3ca74295fb12876b60d00d4d9f7f58d1fd
|
[
"MIT"
] | null | null | null |
import pickle
with open("assoc_data_pickle", "rb") as f:
data = pickle.load(f)
with open("assoc_data_pickle_new", "rb") as f:
new_data = pickle.load(f)
data.update(new_data)
with open("assoc_data_pickle_updated", "wb") as f:
pickle.dump(data, f, protocol=4)
with open("bachelor_data_pickle", "rb") as f:
data = pickle.load(f)
with open("bachelor_data_pickle_new", "rb") as f:
new_data = pickle.load(f)
data.update(new_data)
with open("bachelor_data_pickle_updated", "wb") as f:
pickle.dump(data, f, protocol=4)
| 24.521739
| 54
| 0.66844
| 92
| 564
| 3.880435
| 0.195652
| 0.280112
| 0.056022
| 0.168067
| 0.966387
| 0.812325
| 0.812325
| 0.812325
| 0.812325
| 0.812325
| 0
| 0.004357
| 0.18617
| 564
| 23
| 55
| 24.521739
| 0.77342
| 0
| 0
| 0.533333
| 0
| 0
| 0.270718
| 0.180479
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b0f6d7be799b621c7e3555ed7e720763e33b45a4
| 729
|
py
|
Python
|
example/src/functions.py
|
BuvinJT/pyMkDocs
|
e6044ba64519e778f9acb0225dd4fd3c7e53188f
|
[
"MIT"
] | null | null | null |
example/src/functions.py
|
BuvinJT/pyMkDocs
|
e6044ba64519e778f9acb0225dd4fd3c7e53188f
|
[
"MIT"
] | null | null | null |
example/src/functions.py
|
BuvinJT/pyMkDocs
|
e6044ba64519e778f9acb0225dd4fd3c7e53188f
|
[
"MIT"
] | null | null | null |
#import numpy as np
MIN_SIZE=-1
"""This is the minimum size allowed!"""
_pro_min_size=-2
__pri_min_size=-3
def mini(x, y):
"""
Take the max between two numbers
**Parameters**
> **x:** `float` -- Description of parameter `x`.
> **y:** `float` -- Description of parameter `y`.
**Returns**
> `float` -- Description of returned object.
"""
#return np.min(x, y)
return y
def mini2peutetre(x, y):
"""
Take the max between two numbers
**Parameters**
> **x:** `float` -- Description of parameter `x`.
> **y:** `float` -- Description of parameter `y`.
**Returns**
> `float` -- Description of returned object.
"""
#return np.min(x, y)
return y
| 16.953488
| 53
| 0.569273
| 93
| 729
| 4.376344
| 0.376344
| 0.029484
| 0.265356
| 0.265356
| 0.766585
| 0.766585
| 0.766585
| 0.766585
| 0.766585
| 0.766585
| 0
| 0.007421
| 0.260631
| 729
| 42
| 54
| 17.357143
| 0.747681
| 0.652949
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
b019c674e13f9adc3c0e65532ae6b544a19195aa
| 9,201
|
py
|
Python
|
HW4-keyvan-dadashzadeh-97522148/Q4/gen/Q4Listener.py
|
keyvan1379/IUSTCompiler
|
ca7f03fe23d0322335a2e8a333967d5905ac3345
|
[
"MIT"
] | null | null | null |
HW4-keyvan-dadashzadeh-97522148/Q4/gen/Q4Listener.py
|
keyvan1379/IUSTCompiler
|
ca7f03fe23d0322335a2e8a333967d5905ac3345
|
[
"MIT"
] | null | null | null |
HW4-keyvan-dadashzadeh-97522148/Q4/gen/Q4Listener.py
|
keyvan1379/IUSTCompiler
|
ca7f03fe23d0322335a2e8a333967d5905ac3345
|
[
"MIT"
] | null | null | null |
# Generated from Q4.g4 by ANTLR 4.9
from antlr4 import *
if __name__ is not None and "." in __name__:
from .Q4Parser import Q4Parser
else:
from Q4Parser import Q4Parser
# This class defines a complete listener for a parse tree produced by Q4Parser.
class Q4Listener(ParseTreeListener):
# Enter a parse tree produced by Q4Parser#program.
def enterProgram(self, ctx:Q4Parser.ProgramContext):
pass
# Exit a parse tree produced by Q4Parser#program.
def exitProgram(self, ctx:Q4Parser.ProgramContext):
pass
# Enter a parse tree produced by Q4Parser#mainClass.
def enterMainClass(self, ctx:Q4Parser.MainClassContext):
pass
# Exit a parse tree produced by Q4Parser#mainClass.
def exitMainClass(self, ctx:Q4Parser.MainClassContext):
pass
# Enter a parse tree produced by Q4Parser#classDeclaration.
def enterClassDeclaration(self, ctx:Q4Parser.ClassDeclarationContext):
pass
# Exit a parse tree produced by Q4Parser#classDeclaration.
def exitClassDeclaration(self, ctx:Q4Parser.ClassDeclarationContext):
pass
# Enter a parse tree produced by Q4Parser#varDeclaration.
def enterVarDeclaration(self, ctx:Q4Parser.VarDeclarationContext):
pass
# Exit a parse tree produced by Q4Parser#varDeclaration.
def exitVarDeclaration(self, ctx:Q4Parser.VarDeclarationContext):
pass
# Enter a parse tree produced by Q4Parser#methodDeclaration.
def enterMethodDeclaration(self, ctx:Q4Parser.MethodDeclarationContext):
pass
# Exit a parse tree produced by Q4Parser#methodDeclaration.
def exitMethodDeclaration(self, ctx:Q4Parser.MethodDeclarationContext):
pass
# Enter a parse tree produced by Q4Parser#type.
def enterType(self, ctx:Q4Parser.TypeContext):
pass
# Exit a parse tree produced by Q4Parser#type.
def exitType(self, ctx:Q4Parser.TypeContext):
pass
# Enter a parse tree produced by Q4Parser#state_if.
def enterState_if(self, ctx:Q4Parser.State_ifContext):
pass
# Exit a parse tree produced by Q4Parser#state_if.
def exitState_if(self, ctx:Q4Parser.State_ifContext):
pass
# Enter a parse tree produced by Q4Parser#state_brace.
def enterState_brace(self, ctx:Q4Parser.State_braceContext):
pass
# Exit a parse tree produced by Q4Parser#state_brace.
def exitState_brace(self, ctx:Q4Parser.State_braceContext):
pass
# Enter a parse tree produced by Q4Parser#state_while.
def enterState_while(self, ctx:Q4Parser.State_whileContext):
pass
# Exit a parse tree produced by Q4Parser#state_while.
def exitState_while(self, ctx:Q4Parser.State_whileContext):
pass
# Enter a parse tree produced by Q4Parser#state_println.
def enterState_println(self, ctx:Q4Parser.State_printlnContext):
pass
# Exit a parse tree produced by Q4Parser#state_println.
def exitState_println(self, ctx:Q4Parser.State_printlnContext):
pass
# Enter a parse tree produced by Q4Parser#state_equal_assign.
def enterState_equal_assign(self, ctx:Q4Parser.State_equal_assignContext):
pass
# Exit a parse tree produced by Q4Parser#state_equal_assign.
def exitState_equal_assign(self, ctx:Q4Parser.State_equal_assignContext):
pass
# Enter a parse tree produced by Q4Parser#state_access_array_assign.
def enterState_access_array_assign(self, ctx:Q4Parser.State_access_array_assignContext):
pass
# Exit a parse tree produced by Q4Parser#state_access_array_assign.
def exitState_access_array_assign(self, ctx:Q4Parser.State_access_array_assignContext):
pass
# Enter a parse tree produced by Q4Parser#expr_term_useless_4.
def enterExpr_term_useless_4(self, ctx:Q4Parser.Expr_term_useless_4Context):
pass
# Exit a parse tree produced by Q4Parser#expr_term_useless_4.
def exitExpr_term_useless_4(self, ctx:Q4Parser.Expr_term_useless_4Context):
pass
# Enter a parse tree produced by Q4Parser#expr_term_useless_5.
def enterExpr_term_useless_5(self, ctx:Q4Parser.Expr_term_useless_5Context):
pass
# Exit a parse tree produced by Q4Parser#expr_term_useless_5.
def exitExpr_term_useless_5(self, ctx:Q4Parser.Expr_term_useless_5Context):
pass
# Enter a parse tree produced by Q4Parser#expr_term_useless_2.
def enterExpr_term_useless_2(self, ctx:Q4Parser.Expr_term_useless_2Context):
pass
# Exit a parse tree produced by Q4Parser#expr_term_useless_2.
def exitExpr_term_useless_2(self, ctx:Q4Parser.Expr_term_useless_2Context):
pass
# Enter a parse tree produced by Q4Parser#expr_term_useless_3.
def enterExpr_term_useless_3(self, ctx:Q4Parser.Expr_term_useless_3Context):
pass
# Exit a parse tree produced by Q4Parser#expr_term_useless_3.
def exitExpr_term_useless_3(self, ctx:Q4Parser.Expr_term_useless_3Context):
pass
# Enter a parse tree produced by Q4Parser#expr_term_useless_1.
def enterExpr_term_useless_1(self, ctx:Q4Parser.Expr_term_useless_1Context):
pass
# Exit a parse tree produced by Q4Parser#expr_term_useless_1.
def exitExpr_term_useless_1(self, ctx:Q4Parser.Expr_term_useless_1Context):
pass
# Enter a parse tree produced by Q4Parser#expr_term_plus.
def enterExpr_term_plus(self, ctx:Q4Parser.Expr_term_plusContext):
pass
# Exit a parse tree produced by Q4Parser#expr_term_plus.
def exitExpr_term_plus(self, ctx:Q4Parser.Expr_term_plusContext):
pass
# Enter a parse tree produced by Q4Parser#expr_term_false.
def enterExpr_term_false(self, ctx:Q4Parser.Expr_term_falseContext):
pass
# Exit a parse tree produced by Q4Parser#expr_term_false.
def exitExpr_term_false(self, ctx:Q4Parser.Expr_term_falseContext):
pass
# Enter a parse tree produced by Q4Parser#expr_term_less.
def enterExpr_term_less(self, ctx:Q4Parser.Expr_term_lessContext):
pass
# Exit a parse tree produced by Q4Parser#expr_term_less.
def exitExpr_term_less(self, ctx:Q4Parser.Expr_term_lessContext):
pass
# Enter a parse tree produced by Q4Parser#expr_term_minus.
def enterExpr_term_minus(self, ctx:Q4Parser.Expr_term_minusContext):
pass
# Exit a parse tree produced by Q4Parser#expr_term_minus.
def exitExpr_term_minus(self, ctx:Q4Parser.Expr_term_minusContext):
pass
# Enter a parse tree produced by Q4Parser#expr_term_int.
def enterExpr_term_int(self, ctx:Q4Parser.Expr_term_intContext):
pass
# Exit a parse tree produced by Q4Parser#expr_term_int.
def exitExpr_term_int(self, ctx:Q4Parser.Expr_term_intContext):
pass
# Enter a parse tree produced by Q4Parser#expr_term_paran.
def enterExpr_term_paran(self, ctx:Q4Parser.Expr_term_paranContext):
pass
# Exit a parse tree produced by Q4Parser#expr_term_paran.
def exitExpr_term_paran(self, ctx:Q4Parser.Expr_term_paranContext):
pass
# Enter a parse tree produced by Q4Parser#expr_term_true.
def enterExpr_term_true(self, ctx:Q4Parser.Expr_term_trueContext):
pass
# Exit a parse tree produced by Q4Parser#expr_term_true.
def exitExpr_term_true(self, ctx:Q4Parser.Expr_term_trueContext):
pass
# Enter a parse tree produced by Q4Parser#expr_term_not.
def enterExpr_term_not(self, ctx:Q4Parser.Expr_term_notContext):
pass
# Exit a parse tree produced by Q4Parser#expr_term_not.
def exitExpr_term_not(self, ctx:Q4Parser.Expr_term_notContext):
pass
# Enter a parse tree produced by Q4Parser#expr_term_call_function.
def enterExpr_term_call_function(self, ctx:Q4Parser.Expr_term_call_functionContext):
pass
# Exit a parse tree produced by Q4Parser#expr_term_call_function.
def exitExpr_term_call_function(self, ctx:Q4Parser.Expr_term_call_functionContext):
pass
# Enter a parse tree produced by Q4Parser#expr_term_id.
def enterExpr_term_id(self, ctx:Q4Parser.Expr_term_idContext):
pass
# Exit a parse tree produced by Q4Parser#expr_term_id.
def exitExpr_term_id(self, ctx:Q4Parser.Expr_term_idContext):
pass
# Enter a parse tree produced by Q4Parser#expr_term_and.
def enterExpr_term_and(self, ctx:Q4Parser.Expr_term_andContext):
pass
# Exit a parse tree produced by Q4Parser#expr_term_and.
def exitExpr_term_and(self, ctx:Q4Parser.Expr_term_andContext):
pass
# Enter a parse tree produced by Q4Parser#expr_term_multiply.
def enterExpr_term_multiply(self, ctx:Q4Parser.Expr_term_multiplyContext):
pass
# Exit a parse tree produced by Q4Parser#expr_term_multiply.
def exitExpr_term_multiply(self, ctx:Q4Parser.Expr_term_multiplyContext):
pass
# Enter a parse tree produced by Q4Parser#identifier.
def enterIdentifier(self, ctx:Q4Parser.IdentifierContext):
pass
# Exit a parse tree produced by Q4Parser#identifier.
def exitIdentifier(self, ctx:Q4Parser.IdentifierContext):
pass
del Q4Parser
| 32.62766
| 92
| 0.741115
| 1,240
| 9,201
| 5.255645
| 0.095161
| 0.125211
| 0.166948
| 0.168482
| 0.874022
| 0.802517
| 0.79822
| 0.752647
| 0.646156
| 0.42627
| 0
| 0.021969
| 0.198565
| 9,201
| 282
| 93
| 32.62766
| 0.861812
| 0.371155
| 0
| 0.472441
| 1
| 0
| 0.000177
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.472441
| false
| 0.472441
| 0.023622
| 0
| 0.503937
| 0.015748
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
c65eaffc280fa623802b5bf24b26756f69abc970
| 373
|
py
|
Python
|
utility_scripts/switch_power.py
|
UCT-White-Lab/provisioning-jig
|
d2b8b5738651b9f2885beebe4b50b2cd9d49e973
|
[
"MIT"
] | null | null | null |
utility_scripts/switch_power.py
|
UCT-White-Lab/provisioning-jig
|
d2b8b5738651b9f2885beebe4b50b2cd9d49e973
|
[
"MIT"
] | null | null | null |
utility_scripts/switch_power.py
|
UCT-White-Lab/provisioning-jig
|
d2b8b5738651b9f2885beebe4b50b2cd9d49e973
|
[
"MIT"
] | null | null | null |
import usb.core
def pOff(pNum):
hubs = usb.core.find(find_all=True, bDeviceClass=usb.CLASS_HUB)
dev_hub = hubs.next()
dev_hub.ctrl_transfer((usb.TYPE_CLASS | usb.RECIP_OTHER), 1, 8, pNum)
def pOn(pNum):
hubs = usb.core.find(find_all=True, bDeviceClass=usb.CLASS_HUB)
dev_hub = hubs.next()
dev_hub.ctrl_transfer((usb.TYPE_CLASS | usb.RECIP_OTHER), 3, 8, pNum)
| 31.083333
| 73
| 0.729223
| 65
| 373
| 3.969231
| 0.369231
| 0.093023
| 0.085271
| 0.116279
| 0.852713
| 0.852713
| 0.852713
| 0.852713
| 0.852713
| 0.852713
| 0
| 0.012232
| 0.123324
| 373
| 11
| 74
| 33.909091
| 0.776758
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.111111
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c699f02aaba84087610091f90d7dce8150c4a810
| 174
|
py
|
Python
|
mrkt/framework/__init__.py
|
Tefx/Meerkat
|
ad9d4d3973a990406b976998dce9727b40139650
|
[
"MIT"
] | null | null | null |
mrkt/framework/__init__.py
|
Tefx/Meerkat
|
ad9d4d3973a990406b976998dce9727b40139650
|
[
"MIT"
] | null | null | null |
mrkt/framework/__init__.py
|
Tefx/Meerkat
|
ad9d4d3973a990406b976998dce9727b40139650
|
[
"MIT"
] | null | null | null |
from mrkt.framework.cluster.pool import Pool
from .cluster.pool import Pool
from .platform.local import Hosts
from .platform.AWS import EC2
from .service.docker import ViaSSH
| 34.8
| 44
| 0.827586
| 27
| 174
| 5.333333
| 0.518519
| 0.152778
| 0.236111
| 0.291667
| 0.347222
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006452
| 0.109195
| 174
| 5
| 45
| 34.8
| 0.922581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c6c0cf8e38240787623b6d28a8dcb2d8f441df79
| 7,442
|
py
|
Python
|
notifierlib/channels/jabber.py
|
costastf/notifierlib
|
ddf5d32dd114a55717eb0d267b165db27a655db0
|
[
"MIT"
] | null | null | null |
notifierlib/channels/jabber.py
|
costastf/notifierlib
|
ddf5d32dd114a55717eb0d267b165db27a655db0
|
[
"MIT"
] | 2
|
2017-09-21T15:24:39.000Z
|
2021-07-07T11:17:08.000Z
|
notifierlib/channels/jabber.py
|
costastf/notifierlib
|
ddf5d32dd114a55717eb0d267b165db27a655db0
|
[
"MIT"
] | 1
|
2017-09-20T17:12:26.000Z
|
2017-09-20T17:12:26.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: jabber.py
"""Jabber module file."""
import logging
import sleekxmpp
from notifierlib.notifierlib import Channel
__author__ = '''Costas Tyfoxylos <costas.tyf@gmail.com>, Argiris Gounaris <agounaris@gmail.com>'''
__docformat__ = 'plaintext'
__date__ = '''19-09-2017'''
class XmppClient(sleekxmpp.ClientXMPP): # pylint: disable=too-many-instance-attributes
"""A basic SleekXMPP bot, logs in, sends message, logs out."""
def __init__(self, # pylint: disable=too-many-arguments
user_id,
password,
recipient,
message,
server,
port,
tls=False,
ssl=True,
reattempt=False):
super(XmppClient, self).__init__(user_id, password)
self._logger = logging.getLogger(self.__class__.__name__)
self.recipient = recipient
self.message = message
self.server = server
self.port = port
self.tls = tls
self.ssl = ssl
self.reattempt = reattempt
self.add_event_handler('session_start', self.start)
self.register_plugin('xep_0030') # Service Discovery
self.register_plugin('xep_0199') # XMPP Ping
# Connect to the XMPP server and start processing XMPP stanzas.
if not self.connect((self.server, self.port),
use_tls=self.tls,
use_ssl=self.ssl,
reattempt=self.reattempt):
message = ('Could not connect to '
'{server}:{port}').format(server=self.server,
port=self.port)
self._logger.error(message)
raise SyntaxError(message)
self.process(block=True)
def start(self, event):
"""Start."""
_ = event # noqa
self.send_message(mto=self.recipient,
mbody=self.message,
mtype='chat')
self.disconnect(wait=True)
class XmppGroupClient(sleekxmpp.ClientXMPP): # pylint: disable=too-many-instance-attributes
"""A basic SleekXMPP bot, logs in, sends message, logs out."""
def __init__(self, # pylint: disable=too-many-arguments
user_id,
password,
room,
nickname,
message,
server,
port,
room_password=None,
tls=False,
ssl=True,
reattempt=False):
super(XmppGroupClient, self).__init__(user_id, password)
self._logger = logging.getLogger(self.__class__.__name__)
self.room = room
self.room_password = room_password
self.nickname = nickname
self.message = message
self.server = server
self.port = port
self.tls = tls
self.ssl = ssl
self.reattempt = reattempt
self.add_event_handler('session_start', self.start)
self.register_plugin('xep_0030') # Service Discovery
self.register_plugin('xep_0045') # Multi-User Chat
self.register_plugin('xep_0199') # XMPP Ping
# Connect to the XMPP server and start processing XMPP stanzas.
if not self.connect((self.server, self.port),
use_tls=self.tls,
use_ssl=self.ssl,
reattempt=self.reattempt):
message = ('Could not connect to '
'{server}:{port}').format(server=self.server,
port=self.port)
self._logger.error(message)
raise SyntaxError(message)
self.process(block=True)
def start(self, event):
"""Start."""
_ = event # noqa
self.plugin['xep_0045'].joinMUC(self.room,
self.nickname,
# If a room password is needed, use:
password=self.room_password,
wait=True)
self.send_message(mto=self.room,
mbody=self.message,
mtype='groupchat')
self.disconnect(wait=True)
class Jabber(Channel): # pylint: disable=too-few-public-methods, too-many-instance-attributes
"""Models a channel for Jabber."""
def __init__(self, # pylint: disable=too-many-arguments
name,
user_id,
password,
recipient_id,
server,
port,
tls=False,
ssl=True,
reattempt=False):
super(Jabber, self).__init__(name)
self._logger = logging.getLogger(self.__class__.__name__)
self.user = user_id
self.password = password
self.server = server
self.recipient = recipient_id
self.port = port
self.tls = tls
self.ssl = ssl
self.reattempt = reattempt
def notify(self, **kwargs):
"""Notify."""
message = kwargs.get('message')
try:
_ = XmppClient(self.user, # noqa
self.password,
self.recipient,
message,
self.server,
self.port,
self.tls,
self.ssl,
self.reattempt)
except Exception: # pylint: disable=broad-except
self._logger.exception()
return False
return True
class JabberGroup(Channel): # pylint: disable=too-few-public-methods, too-many-instance-attributes
"""Models a channel for a Jabber group."""
def __init__(self, # pylint: disable=too-many-arguments
name,
user_id,
password,
room,
nickname,
server,
port,
room_password=None,
tls=False,
ssl=True,
reattempt=False):
super(JabberGroup, self).__init__(name)
self._logger = logging.getLogger(self.__class__.__name__)
self.user = user_id
self.password = password
self.nickname = nickname
self.room = room
self.room_password = room_password
self.server = server
self.port = port
self.tls = tls
self.ssl = ssl
self.reattempt = reattempt
def notify(self, **kwargs):
"""Notify."""
message = kwargs.get('message')
try:
_ = XmppGroupClient(self.user, # noqa
self.password,
self.room,
self.nickname,
message,
self.server,
self.port,
self.room_password,
self.tls,
self.ssl,
self.reattempt)
except Exception: # pylint: disable=broad-except
self._logger.exception()
return False
return True
| 35.778846
| 99
| 0.497313
| 687
| 7,442
| 5.199418
| 0.190684
| 0.036394
| 0.035834
| 0.033595
| 0.818309
| 0.771837
| 0.741041
| 0.741041
| 0.741041
| 0.694009
| 0
| 0.007557
| 0.413195
| 7,442
| 207
| 100
| 35.951691
| 0.810396
| 0.129669
| 0
| 0.820809
| 0
| 0
| 0.042297
| 0.006867
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046243
| false
| 0.092486
| 0.017341
| 0
| 0.109827
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
05a2454f50dd6e29f87968abcf2bdd888e944f37
| 89
|
py
|
Python
|
tests/bytecode/mp-tests/class6.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | 303
|
2015-07-11T17:12:55.000Z
|
2018-01-08T03:02:37.000Z
|
tests/bytecode/mp-tests/class6.py
|
roger-/micropython
|
bad2df3e95cd5719099319d71590a79bf6bc4493
|
[
"MIT"
] | 13
|
2016-05-12T16:51:22.000Z
|
2018-01-10T22:33:25.000Z
|
tests/bytecode/mp-tests/class6.py
|
roger-/micropython
|
bad2df3e95cd5719099319d71590a79bf6bc4493
|
[
"MIT"
] | 26
|
2018-01-18T09:15:33.000Z
|
2022-02-07T13:09:14.000Z
|
class A:
def f(self):
pass
class B(A):
def f(self):
super().f()
| 11.125
| 19
| 0.438202
| 14
| 89
| 2.785714
| 0.571429
| 0.205128
| 0.25641
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.393258
| 89
| 7
| 20
| 12.714286
| 0.722222
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.166667
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
05ff43f83bb2a3df512467ab2b06464a2d6c2682
| 1,577
|
py
|
Python
|
nn_utils.py
|
zabil/AdaSwarm
|
41afe3f39037e4563a538a28805e13d225c45b53
|
[
"MIT"
] | 1
|
2021-04-30T18:04:17.000Z
|
2021-04-30T18:04:17.000Z
|
nn_utils.py
|
zabil/AdaSwarm
|
41afe3f39037e4563a538a28805e13d225c45b53
|
[
"MIT"
] | null | null | null |
nn_utils.py
|
zabil/AdaSwarm
|
41afe3f39037e4563a538a28805e13d225c45b53
|
[
"MIT"
] | 1
|
2021-09-28T07:04:39.000Z
|
2021-09-28T07:04:39.000Z
|
import torch
import torch.nn.functional as F
class CELoss:
def __init__(self, y):
self.y = y
self.fitness = torch.nn.CrossEntropyLoss()
def evaluate(self, x):
# print(x, self.y)
return self.fitness(x, self.y)
class CELossWithPSO(torch.autograd.Function):
@staticmethod
def forward(ctx , y, y_pred, sum_cr, eta, gbest):
ctx.save_for_backward(y, y_pred)
ctx.sum_cr = sum_cr
ctx.eta = eta
ctx.gbest = gbest
return F.cross_entropy(y,y_pred)
@staticmethod
def backward(ctx, grad_output):
yy, yy_pred= ctx.saved_tensors
sum_cr = ctx.sum_cr
eta = ctx.eta
grad_input = torch.neg((sum_cr/eta) * (ctx.gbest - yy))
return grad_input, grad_output, None, None, None
class L1Loss:
def __init__(self, y):
self.y = y
self.fitness = torch.nn.L1Loss()
def evaluate(self, x):
# print(x, self.y)
return self.fitness(x, self.y)
class L1LossWithPSO(torch.autograd.Function):
@staticmethod
def forward(ctx , y, y_pred, sum_cr, eta, gbest):
ctx.save_for_backward(y, y_pred)
ctx.sum_cr = sum_cr
ctx.eta = eta
ctx.gbest = gbest
return F.l1_loss(y,y_pred)
@staticmethod
def backward(ctx, grad_output):
yy, yy_pred= ctx.saved_tensors
sum_cr = ctx.sum_cr
eta = ctx.eta
grad_input = torch.neg((sum_cr/eta) * (ctx.gbest - yy))
return grad_input, grad_output, None, None, None
| 30.326923
| 64
| 0.590361
| 222
| 1,577
| 3.995496
| 0.207207
| 0.067644
| 0.040586
| 0.049605
| 0.859076
| 0.859076
| 0.859076
| 0.859076
| 0.859076
| 0.859076
| 0
| 0.00364
| 0.303107
| 1,577
| 52
| 65
| 30.326923
| 0.803458
| 0.020926
| 0
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.045455
| 0.045455
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
af018ab6d91ef3e2f270db20448d8f0d93c875d0
| 181,147
|
py
|
Python
|
src/api/migrations/0001_initial.py
|
WWF-ConsEvidence/mpasocial
|
347c8b92884a73befb19db65ff48c900a545d222
|
[
"MIT"
] | null | null | null |
src/api/migrations/0001_initial.py
|
WWF-ConsEvidence/mpasocial
|
347c8b92884a73befb19db65ff48c900a545d222
|
[
"MIT"
] | 1
|
2021-11-06T20:53:35.000Z
|
2021-11-06T20:53:35.000Z
|
src/api/migrations/0001_initial.py
|
WWF-ConsEvidence/mpasocial
|
347c8b92884a73befb19db65ff48c900a545d222
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2022-02-01 19:59
import api.models.base
from django.conf import settings
import django.contrib.gis.db.models.fields
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('iso_id', models.PositiveSmallIntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='country_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'countries',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='FGD',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('fgdid', models.IntegerField(primary_key=True, serialize=False)),
('fgdcode', models.PositiveSmallIntegerField(default=995)),
('fgdate', models.DateField(blank=True, null=True)),
('fgday', models.PositiveSmallIntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MinValueValidator(31)])),
('fgmonth', models.PositiveSmallIntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MinValueValidator(12)])),
('fgyear', models.PositiveSmallIntegerField(blank=True, choices=[(2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], null=True, validators=[api.models.base.MinValueBCValidator(2000), api.models.base.MaxValueBCValidator(2050)])),
('yearmonitoring', models.PositiveSmallIntegerField(choices=[(2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995, validators=[api.models.base.MinValueBCValidator(2000), api.models.base.MaxValueBCValidator(2050)])),
('starttime', models.TimeField(blank=True, null=True)),
('endtime', models.TimeField(blank=True, null=True)),
('maleparticipants', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(100)])),
('femaleparticipants', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(100)])),
('fgroundname', models.TextField(default='995')),
('fgroundboat', models.CharField(default='995', max_length=255)),
('fgroundtime', models.CharField(default='995', max_length=255)),
('fgrounddist', models.CharField(default='995', max_length=255)),
('fgroundsize', models.CharField(default='995', max_length=255)),
('mpaname', models.CharField(default='995', max_length=255)),
('mpaboat', models.CharField(default='995', max_length=255)),
('mpatime', models.CharField(default='995', max_length=255)),
('mpadist', models.CharField(default='995', max_length=255)),
('mpasize', models.CharField(default='995', max_length=255)),
('ntname', models.CharField(default='995', max_length=255)),
('ntboat', models.CharField(default='995', max_length=255)),
('nttime', models.CharField(default='995', max_length=255)),
('ntdist', models.CharField(default='995', max_length=255)),
('ntsize', models.CharField(default='995', max_length=255)),
('mpahistl', models.TextField(default='995')),
('mpahist', models.TextField(default='995')),
('extbnd', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1), api.models.base.MaxValueBCValidator(100)])),
('intbnd', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1), api.models.base.MaxValueBCValidator(100)])),
('bndlandmarks', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('bndmarkers', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('bndsigns', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('bndgovnotice', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('bndwoutreach', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('bndaoutreach', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('bndvoutreach', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('bndword', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('bndotheroutreach', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('bndother', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('bndotherspecifyl', models.CharField(default='995', max_length=255)),
('bndotherspecify', models.CharField(default='995', max_length=255)),
('penaltyverbal', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(999)])),
('penaltywritten', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penaltyaccess', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penaltyequipment', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penaltyfines', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penaltyprison', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penaltyother', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penaltyotherspecifyl', models.CharField(default='995', max_length=255)),
('penaltyotherspecify', models.CharField(default='995', max_length=255)),
('npenalty', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(7)])),
('verbalsanction', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('physicalsanction', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('monetarysanction', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('conflictl', models.TextField(default='995')),
('conflict', models.TextField(default='995')),
('conflictusertime', models.DecimalField(decimal_places=3, default=995, max_digits=6, validators=[django.core.validators.MinValueValidator(0)])),
('conflictofficialtime', models.DecimalField(decimal_places=3, default=995, max_digits=6, validators=[django.core.validators.MinValueValidator(0)])),
('conflictusercost', models.PositiveIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(1000000000)])),
('conflictofficialcost', models.PositiveIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(1000000000)])),
('conflictuserdist', models.DecimalField(decimal_places=3, default=995, max_digits=6, validators=[django.core.validators.MinValueValidator(0)])),
('conflictofficialdist', models.DecimalField(decimal_places=3, default=995, max_digits=6, validators=[django.core.validators.MinValueValidator(0)])),
('otherinfol', models.TextField(default='995')),
('otherinfo', models.TextField(default='995')),
('otherpeoplel', models.TextField(default='995')),
('otherpeople', models.TextField(default='995')),
('othersourcesl', models.TextField(default='995')),
('othersources', models.TextField(default='995')),
('traditionalgovernancel', models.TextField(default='995')),
('traditionalgovernance', models.TextField(default='995')),
('conflictn', models.CharField(default='995', max_length=255)),
('congroup', models.CharField(default='995', max_length=255)),
('conbtwgroups', models.CharField(default='995', max_length=255)),
('conbtwgroupngov', models.CharField(default='995', max_length=255)),
('congov', models.CharField(default='995', max_length=255)),
('contypemarine', models.CharField(default='995', max_length=255)),
('contypegov', models.CharField(default='995', max_length=255)),
('contypeusers', models.CharField(default='995', max_length=255)),
('contyperec', models.CharField(default='995', max_length=255)),
('contypeother', models.CharField(default='995', max_length=255)),
('contypeotherspecifyl', models.TextField(default='995')),
('contypeotherspecify', models.TextField(default='995')),
('notesl', models.TextField(default='995')),
('notes', models.TextField(default='995')),
('qaqcnotes', models.TextField(default='995')),
],
options={
'verbose_name': 'FGD',
'verbose_name_plural': 'FGDs',
},
),
migrations.CreateModel(
name='Household',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('householdid', models.IntegerField(primary_key=True, serialize=False)),
('kkcode', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1), api.models.base.MaxValueBCValidator(999)])),
('yearmonitoring', models.PositiveSmallIntegerField(choices=[(2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995, validators=[api.models.base.MinValueBCValidator(2000), api.models.base.MaxValueBCValidator(2050)])),
('interviewdate', models.DateField(blank=True, null=True)),
('interviewstart', models.TimeField(blank=True, null=True)),
('interviewend', models.TimeField(blank=True, null=True)),
('interviewlength', models.TimeField(blank=True, null=True)),
('usualfish', models.CharField(default='995', max_length=255)),
('householdsize', models.PositiveSmallIntegerField(default=995)),
('yearsresident', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(150)])),
('primarymarketname', models.CharField(default='995', max_length=255)),
('secondarymarketname', models.CharField(default='995', max_length=255)),
('timemarket', models.DecimalField(decimal_places=3, default=995, max_digits=6, validators=[django.core.validators.MinValueValidator(0)])),
('timesecondarymarket', models.DecimalField(decimal_places=3, default=995, max_digits=6, validators=[django.core.validators.MinValueValidator(0)])),
('paternalethnicity', models.CharField(default='995', max_length=255)),
('maternalethnicity', models.CharField(default='995', max_length=255)),
('religion', models.PositiveSmallIntegerField(choices=[(1, 'Kristen / Christian'), (2, 'Islam / Muslim'), (3, 'Hindu / Hindu'), (4, 'Budha / Buddhist'), (5, 'Yahudi / Jewish'), (6, 'Kepercataan Tradisional / Traditional Beliefs'), (7, 'Atheis / Atheist'), (8, 'Katolik / Catholic'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('primarylivelihoodyear', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('secondarylivelihoodyear', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('tertiarylivelihoodyear', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('lessproductivedaysfishing', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(365)])),
('poorcatch', models.PositiveIntegerField(default=995)),
('poorcatchunits', models.CharField(default='995', max_length=255)),
('poorcatchunitscategory', models.PositiveSmallIntegerField(choices=[(1, 'Tali / Line'), (2, 'Ember / Bucket'), (3, 'Wayah / Wayah (no English translation available)'), (4, 'Ekor / Tail'), (5, 'Loyang / Tray'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('poorfishincome', models.PositiveIntegerField(default=995)),
('poorfishincomeunits', models.CharField(default='995', max_length=255)),
('moreproductivedaysfishing', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(365)])),
('goodcatch', models.PositiveIntegerField(default=995)),
('goodcatchunits', models.CharField(default='995', max_length=255)),
('goodcatchunitscategory', models.PositiveSmallIntegerField(choices=[(1, 'Tali / Line'), (2, 'Ember / Bucket'), (3, 'Wayah / Wayah (no English translation available)'), (4, 'Ekor / Tail'), (5, 'Loyang / Tray'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('goodfishincome', models.PositiveIntegerField(default=995)),
('goodfishincomeunits', models.CharField(default='995', max_length=255)),
('economicstatustrend', models.PositiveSmallIntegerField(choices=[(1, 'Mejadi sangat buruk / Much worse'), (2, 'Menjadi sedikit lebih buruk / Slightly worse'), (3, 'Tidak berubah / No change'), (4, 'Menjadi sedikit lebih baik / Slightly better'), (5, 'Menjadi sangat baik / Much better'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('economicstatusreasonl', models.TextField(default='995')),
('economicstatusreason', models.TextField(default='995')),
('economicadjustreasonl', models.TextField(default='995')),
('economicadjustreason', models.TextField(default='995')),
('assetcar', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(30)])),
('assetcaryear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assetcarassistanceother', models.CharField(default='995', max_length=255)),
('assettruck', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assettruckyear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assettruckassistanceother', models.CharField(default='995', max_length=255)),
('assetcartruck', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assetbicycle', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assetbicycleyear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assetbicycleassistanceother', models.CharField(default='995', max_length=255)),
('assetmotorcycle', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assetmotorcycleyear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assetmotorcycleassistanceother', models.CharField(default='995', max_length=255)),
('assetboatnomotor', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assetboatnomotoryear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assetboatnomotorassistanceother', models.CharField(default='995', max_length=255)),
('assetboatoutboard', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assetboatoutboardyear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assetboatoutboardassistanceother', models.CharField(default='995', max_length=255)),
('assetboatinboard', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assetboatinboardyear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assetboatinboardassistanceother', models.CharField(default='995', max_length=255)),
('assetlandlinephone', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assetlandlinephoneyear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assetlandlinephoneassistanceother', models.CharField(default='995', max_length=255)),
('assetcellphone', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assetcellphoneyear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assetcellphoneassistanceother', models.CharField(default='995', max_length=255)),
('assetphonecombined', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assettv', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assettvyear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assettvassistanceother', models.CharField(default='995', max_length=255)),
('assetradio', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assetradioyear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assetradioassistanceother', models.CharField(default='995', max_length=255)),
('assetstereo', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assetstereoyear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assetstereoassistanceother', models.CharField(default='995', max_length=255)),
('assetcd', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assetcdyear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assetcdassistanceother', models.CharField(default='995', max_length=255)),
('assetdvd', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assetdvdyear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assetdvdassistanceother', models.CharField(default='995', max_length=255)),
('assetentertain', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assetsatellite', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assetsatelliteyear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assetsatelliteassistanceother', models.CharField(default='995', max_length=255)),
('assetgenerator', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(50)])),
('assetgeneratoryear', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1900), api.models.base.MaxValueBCValidator(2022)])),
('assetgeneratorassistanceother', models.CharField(default='995', max_length=255)),
('cookingfuel', models.PositiveSmallIntegerField(choices=[(1, 'Listrik/Gas / Electricity or gas'), (2, 'Minyak/Minyak Tanah / Oil'), (3, 'Kayu / Wood'), (4, 'Arang / Charcoal'), (5, 'Kayu ranting atau serpihan kayu / Small sticks/scrap wood'), (6, ' Serasah, daun, biogas / Weeds, leaves, dung'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('householddeath', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('householdbirth', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fsnotenough', models.PositiveSmallIntegerField(choices=[(1, 'Sering / Often true'), (2, 'Kadang-kadang / Sometimes true'), (3, 'Tidak pernah / Never true'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fsdidnotlast', models.PositiveSmallIntegerField(choices=[(1, 'Sering / Often true'), (2, 'Kadang-kadang / Sometimes true'), (3, 'Tidak pernah / Never true'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fsbalanceddiet', models.PositiveSmallIntegerField(choices=[(1, 'Sering / Often true'), (2, 'Kadang-kadang / Sometimes true'), (3, 'Tidak pernah / Never true'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fsadultskip', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fsfreqadultskip', models.PositiveSmallIntegerField(choices=[(1, 'Hampir setiap bulan / Almost every month'), (2, 'Beberapa bulan tetapi tidak setiap bulan / Some months but not every month'), (3, 'Hanya satu atau dua bulan / Only one or two months a year'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fseatless', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fshungry', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fschildportion', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fslowcostfood', models.PositiveSmallIntegerField(choices=[(1, 'Sering / Often true'), (2, 'Kadang-kadang / Sometimes true'), (3, 'Tidak pernah / Never true'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fschildskip', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fsfreqchildskip', models.PositiveSmallIntegerField(choices=[(1, 'Hampir setiap bulan / Almost every month'), (2, 'Beberapa bulan tetapi tidak setiap bulan / Some months but not every month'), (3, 'Hanya satu atau dua bulan / Only one or two months a year'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fsnomealchild', models.PositiveSmallIntegerField(choices=[(1, 'Sering / Often true'), (2, 'Kadang-kadang / Sometimes true'), (3, 'Tidak pernah / Never true'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('rightsaccess', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('rightsharvest', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('rightsmanage', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('rightsexclude', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('rightstransfer', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('socialconflict', models.PositiveSmallIntegerField(choices=[(1, 'Sangat meningkat / Greatly Increased'), (2, 'Meningkat / Increased'), (3, 'Tidak ada perubahan / Neither increased nor decreased'), (4, 'Menurun / Decreased'), (5, 'Sangat menurum / Greatly decreased'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('marinegroup', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('numbermarinegroup', models.PositiveSmallIntegerField(default=995)),
('othergroup', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('numberothergroup', models.PositiveSmallIntegerField(default=995)),
('votedistrict', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('votenational', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('numlocalthreat', models.PositiveSmallIntegerField(default=995)),
('numglobalthreat', models.PositiveSmallIntegerField(default=995)),
('numlocalaction', models.PositiveSmallIntegerField(default=995)),
('numglobalaction', models.PositiveSmallIntegerField(default=995)),
('placehappy', models.PositiveSmallIntegerField(choices=[(1, 'Apakah anda sangat tidak setuju / Strongly disagree'), (2, 'Tidak setuju / Disagree'), (3, 'Netral / Neither agree nor disagree'), (4, 'Setuju atau / Agree'), (5, 'Dangat setuju dengan pernyataan ini / Strongly agree'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('placefavourite', models.PositiveSmallIntegerField(choices=[(1, 'Apakah anda sangat tidak setuju / Strongly disagree'), (2, 'Tidak setuju / Disagree'), (3, 'Netral / Neither agree nor disagree'), (4, 'Setuju atau / Agree'), (5, 'Dangat setuju dengan pernyataan ini / Strongly agree'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('placemiss', models.PositiveSmallIntegerField(choices=[(1, 'Apakah anda sangat tidak setuju / Strongly disagree'), (2, 'Tidak setuju / Disagree'), (3, 'Netral / Neither agree nor disagree'), (4, 'Setuju atau / Agree'), (5, 'Dangat setuju dengan pernyataan ini / Strongly agree'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('placebest', models.PositiveSmallIntegerField(choices=[(1, 'Apakah anda sangat tidak setuju / Strongly disagree'), (2, 'Tidak setuju / Disagree'), (3, 'Netral / Neither agree nor disagree'), (4, 'Setuju atau / Agree'), (5, 'Dangat setuju dengan pernyataan ini / Strongly agree'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('placefishhere', models.PositiveSmallIntegerField(choices=[(1, 'Apakah anda sangat tidak setuju / Strongly disagree'), (2, 'Tidak setuju / Disagree'), (3, 'Netral / Neither agree nor disagree'), (4, 'Setuju atau / Agree'), (5, 'Dangat setuju dengan pernyataan ini / Strongly agree'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('placebemyself', models.PositiveSmallIntegerField(choices=[(1, 'Apakah anda sangat tidak setuju / Strongly disagree'), (2, 'Tidak setuju / Disagree'), (3, 'Netral / Neither agree nor disagree'), (4, 'Setuju atau / Agree'), (5, 'Dangat setuju dengan pernyataan ini / Strongly agree'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('primarylivelihoodcovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('secondarylivelihoodcovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('tertiarylivelihoodcovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('freqfishtimecovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('freqsalefishcovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('percentincomefishcovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('freqeatfishcovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('percentproteinfishcovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('majorfishtechniquecovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('poorfishincomecovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('goodfishincomecovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fsnotenoughcovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fsdidnotlastcovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fsbalanceddietcovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fseatlesscovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fshungrycovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fschildportioncovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fslowcostfoodcovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fsfreqchildskipcovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fsnomealchildcovid', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('marinegroupcovid', models.TextField(default='995')),
('othergroupcovid', models.TextField(default='995')),
('anyotherinfo', models.TextField(default='995')),
('willingparticipant', models.TextField(default='995')),
('notes', models.TextField(default='995')),
('dataentrycomplete', models.BooleanField(blank=True, null=True)),
('datacheckcomplete', models.BooleanField(blank=True, null=True)),
('worstdaycatch', models.CharField(default='995', max_length=255)),
('worstdaycatchunits', models.CharField(default='995', max_length=255)),
('bestdaycatch', models.CharField(default='995', max_length=255)),
('bestdaycatchunits', models.CharField(default='995', max_length=255)),
('averageincome', models.CharField(default='995', max_length=255)),
('averageincomeunits', models.CharField(default='995', max_length=255)),
('worstincome', models.CharField(default='995', max_length=255)),
('worstincomeunits', models.CharField(default='995', max_length=255)),
('bestincome', models.CharField(default='995', max_length=255)),
('bestincomeunits', models.CharField(default='995', max_length=255)),
('entrycomputeridentifier', models.CharField(default='995', max_length=255)),
('entryhouseholdid', models.IntegerField(blank=True, null=True)),
('pilotreferencecode', models.CharField(default='995', max_length=255)),
('baseline_t2_pairs', models.FloatField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='KII',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('kiiid', models.IntegerField(primary_key=True, serialize=False)),
('kiicode', models.TextField(default='995')),
('keyinformantrole', models.CharField(default='995', max_length=255)),
('kiidate', models.DateField(blank=True, null=True)),
('kiiday', models.PositiveSmallIntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MinValueValidator(31)])),
('kiimonth', models.PositiveSmallIntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MinValueValidator(12)])),
('kiiyear', models.PositiveSmallIntegerField(blank=True, choices=[(2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], null=True, validators=[api.models.base.MinValueBCValidator(2000), api.models.base.MaxValueBCValidator(2050)])),
('yearmonitoring', models.PositiveSmallIntegerField(choices=[(2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995, validators=[api.models.base.MinValueBCValidator(2000), api.models.base.MaxValueBCValidator(2050)])),
('starttime', models.TimeField(blank=True, null=True)),
('endtime', models.TimeField(blank=True, null=True)),
('mpahistoryl', models.TextField(default='995')),
('mpahistory', models.TextField(default='995')),
('pilotnzones', models.PositiveSmallIntegerField(default=995)),
('ecozone', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('soczone', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('druleeco', models.PositiveSmallIntegerField(choices=[(1, 'Tidak pernah / Never'), (2, 'Hampir tidak pernah / Rarely'), (3, 'Kadang-kadang / Sometimes'), (4, 'Biasanya / Usually'), (5, 'Selalu / Always'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('drulesoc', models.PositiveSmallIntegerField(choices=[(1, 'Tidak pernah / Never'), (2, 'Hampir tidak pernah / Rarely'), (3, 'Kadang-kadang / Sometimes'), (4, 'Biasanya / Usually'), (5, 'Selalu / Always'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('pilotnestedness', models.CharField(default='995', max_length=255)),
('rulecomml', models.TextField(default='995')),
('rulecomm', models.TextField(default='995')),
('ruleawarel', models.TextField(default='995')),
('ruleaware', models.TextField(default='995')),
('rulepracticel', models.TextField(default='995')),
('rulepractice', models.TextField(default='995')),
('informalrulel', models.TextField(default='995')),
('informalrule', models.TextField(default='995')),
('ruleparticipationl', models.TextField(default='995')),
('ruleparticipation', models.TextField(default='995')),
('monitorl', models.TextField(default='995')),
('monitor', models.TextField(default='995')),
('penverbal', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penwritten', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penaccess', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penequipment', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penfines', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penincarceraton', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penother', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penotherspecifyl', models.CharField(default='995', max_length=255)),
('penotherspecify', models.CharField(default='995', max_length=255)),
('penfreq', models.PositiveSmallIntegerField(choices=[(1, 'Tidak pernah / Never'), (2, 'Hampir tidak pernah / Rarely'), (3, 'Kadang-kadang / Sometimes'), (4, 'Biasanya / Usually'), (5, 'Selalu / Always'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penprevious', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('peneco', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penecon', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('pensoc', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penwealth', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penpower', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penstatus', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penfactorother', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penfactorotherspecifyl', models.CharField(default='995', max_length=255)),
('penfactorotherspecify', models.CharField(default='995', max_length=255)),
('incened', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('incenskills', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('incenequipment', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('incenpurchase', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('incenloan', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('incenpayment', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('incenemploy', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('incenother', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('incenotherspecifyl', models.CharField(default='995', max_length=255)),
('incenotherspecify', models.CharField(default='995', max_length=255)),
('ecomonverbal', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('ecomonwritten', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('ecomonaccess', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('ecomonposition', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('ecomonequipment', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('ecomonfine', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('ecomonincarceration', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('ecomonother', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('ecomonotherspecifyl', models.CharField(default='995', max_length=255)),
('ecomonotherspecify', models.CharField(default='995', max_length=255)),
('socmonverbal', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('socmonwritten', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('socmonaccess', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('socmonposition', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('socmonequipment', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('socmonfine', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('socmonincarceration', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('socmonother', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('socmonotherspecifyl', models.CharField(default='995', max_length=255)),
('socmonotherspecify', models.CharField(default='995', max_length=255)),
('compmonverbal', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('compmonwritten', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('compmonaccess', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('compmonposition', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('compmonequipment', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('compmonfine', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('compmonincarceration', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('compmonother', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('compmonotherspecifyl', models.CharField(default='995', max_length=255)),
('compmonotherspecify', models.CharField(default='995', max_length=255)),
('penmonverbal', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penmonwritten', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penmonaccess', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penmonposition', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penmonequipment', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penmonfine', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penmonincarceration', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penmonother', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('penmonotherspecifyl', models.CharField(default='995', max_length=255)),
('penmonotherspecify', models.CharField(default='995', max_length=255)),
('conflictresl', models.TextField(default='995')),
('conflictres', models.TextField(default='995')),
('ecoimpactl', models.TextField(default='995')),
('ecoimpact', models.TextField(default='995')),
('socimpactl', models.TextField(default='995')),
('socimpact', models.TextField(default='995')),
('contributionl', models.TextField(default='995')),
('contribution', models.TextField(default='995')),
('benefitl', models.TextField(default='995')),
('benefit', models.TextField(default='995')),
('ecoimpactcovidl', models.TextField(default='995')),
('ecoimpactcovid', models.TextField(default='995')),
('socimpactcovidl', models.TextField(default='995')),
('socimpactcovid', models.TextField(default='995')),
('mpaimpactcovidl', models.TextField(default='995')),
('mpaimpactcovid', models.TextField(default='995')),
('anyotherinfol', models.TextField(default='995')),
('anyotherinfo', models.TextField(default='995')),
('anyotherkil', models.TextField(default='995')),
('anyotherki', models.TextField(default='995')),
('anyotherdocsl', models.TextField(default='995')),
('anyotherdocs', models.TextField(default='995')),
('notesl', models.TextField(default='995')),
('notes', models.TextField(default='995')),
('violationfreq', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1), api.models.base.MaxValueBCValidator(999)])),
],
options={
'verbose_name': 'KII',
'verbose_name_plural': 'KIIs',
},
),
migrations.CreateModel(
name='LkpFishTechCategory',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('code', models.IntegerField(primary_key=True, serialize=False)),
('bahasaindonesia', models.CharField(blank=True, max_length=255)),
('english', models.CharField(blank=True, max_length=255)),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='lkpfishtechcategory_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'lkp fish tech categories',
'ordering': ('code',),
},
),
migrations.CreateModel(
name='LkpNoneToAllScale',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('code', models.IntegerField(primary_key=True, serialize=False)),
('bahasaindonesia', models.CharField(blank=True, max_length=255)),
('english', models.CharField(blank=True, max_length=255)),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='lkpnonetoallscale_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('code',),
},
),
migrations.CreateModel(
name='MPA',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('mpaid', models.IntegerField(primary_key=True, serialize=False)),
('mpaname', models.CharField(max_length=255)),
('wdpaid', models.IntegerField(blank=True, null=True)),
('estyear', models.PositiveSmallIntegerField(blank=True, null=True, validators=[django.core.validators.MaxValueValidator(2022)], verbose_name='year established')),
('notes', models.TextField(default='995')),
('boundary', django.contrib.gis.db.models.fields.MultiPolygonField(blank=True, geography=True, null=True, srid=4326)),
('size', models.IntegerField(blank=True, null=True, verbose_name='Size (km2)')),
('country', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='api.Country')),
],
options={
'verbose_name': 'MPA',
'verbose_name_plural': 'MPAs',
'ordering': ('mpaname', 'estyear'),
},
),
migrations.CreateModel(
name='MPAInterviewYear',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('year', models.PositiveSmallIntegerField(validators=[django.core.validators.MinValueValidator(2000), django.core.validators.MaxValueValidator(2050)])),
('mpa', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.MPA')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='mpainterviewyear_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'MPA interview year',
},
),
migrations.CreateModel(
name='Zone',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('zoneid', models.IntegerField(primary_key=True, serialize=False)),
('zonetypel', models.CharField(default='995', max_length=255)),
('zonetype', models.CharField(default='995', max_length=255)),
('zonequantity', models.PositiveSmallIntegerField(default=995)),
('zoneorg', models.CharField(default='995', max_length=255)),
('zonecoord', models.PositiveSmallIntegerField(choices=[(1, 'Tidak pernah / Never'), (2, 'Hampir tidak pernah / Rarely'), (3, 'Kadang-kadang / Sometimes'), (4, 'Biasanya / Usually'), (5, 'Selalu / Always'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('notes', models.TextField(default='995')),
('kii', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.KII')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='zone_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Users',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('userid', models.IntegerField(primary_key=True, serialize=False)),
('usercode', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1), api.models.base.MaxValueBCValidator(999)])),
('usernamel', models.CharField(default='995', max_length=255)),
('username', models.CharField(default='995', max_length=255)),
('participateestablish', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('participateboundaries', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('participateadmin', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('participaterules', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('monitoreco', models.PositiveSmallIntegerField(choices=[(1, 'Kurang dari satu kali per tahun / Less than one time per year'), (2, 'Beberapa kali per tahun / A few times per year'), (3, 'Beberapa kali per bulan / A few times per month'), (4, 'Berberapa kali per minggu / A few times per week'), (5, 'Lebih dari satu kali sehari / More than once per day'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('monitorsoc', models.PositiveSmallIntegerField(choices=[(1, 'Kurang dari satu kali per tahun / Less than one time per year'), (2, 'Beberapa kali per tahun / A few times per year'), (3, 'Beberapa kali per bulan / A few times per month'), (4, 'Berberapa kali per minggu / A few times per week'), (5, 'Lebih dari satu kali sehari / More than once per day'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('monitorcompliance', models.PositiveSmallIntegerField(choices=[(1, 'Kurang dari satu kali per tahun / Less than one time per year'), (2, 'Beberapa kali per tahun / A few times per year'), (3, 'Beberapa kali per bulan / A few times per month'), (4, 'Berberapa kali per minggu / A few times per week'), (5, 'Lebih dari satu kali sehari / More than once per day'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('enforcefreq', models.PositiveSmallIntegerField(choices=[(1, 'Kurang dari satu kali per tahun / Less than one time per year'), (2, 'Beberapa kali per tahun / A few times per year'), (3, 'Beberapa kali per bulan / A few times per month'), (4, 'Berberapa kali per minggu / A few times per week'), (5, 'Lebih dari satu kali sehari / More than once per day'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('contributionrank', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1), api.models.base.MaxValueBCValidator(25)])),
('benefitrank', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1), api.models.base.MaxValueBCValidator(25)])),
('monitorcovidl', models.TextField(default='995')),
('monitorcovid', models.TextField(default='995')),
('covidassistancel', models.TextField(default='995')),
('covidassistance', models.TextField(default='995')),
('conservationimpactcovidl', models.TextField(default='995')),
('conservationimpactcovid', models.TextField(default='995')),
('fgd', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.FGD')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='users_updated_by', to=settings.AUTH_USER_MODEL)),
('userextbnd', models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='nonetoall_externalboundary', to='api.LkpNoneToAllScale')),
('userintbnd', models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='nonetoall_internalboundary', to='api.LkpNoneToAllScale')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UserProfile',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='profile', serialize=False, to=settings.AUTH_USER_MODEL)),
('mpa_interviewyears', models.ManyToManyField(blank=True, to='api.MPAInterviewYear', verbose_name='MPA interview years')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='userprofile_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Stakeholder',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('stakeholderid', models.IntegerField(primary_key=True, serialize=False)),
('stakeholdernamel', models.CharField(default='995', max_length=255)),
('stakeholdername', models.CharField(default='995', max_length=255)),
('participateestablish', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('participateboundaries', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('participateadmin', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('participaterules', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('monitoreco', models.PositiveSmallIntegerField(choices=[(1, 'Kurang dari satu kali per tahun / Less than one time per year'), (2, 'Beberapa kali per tahun / A few times per year'), (3, 'Beberapa kali per bulan / A few times per month'), (4, 'Berberapa kali per minggu / A few times per week'), (5, 'Lebih dari satu kali sehari / More than once per day'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('monitorsoc', models.PositiveSmallIntegerField(choices=[(1, 'Kurang dari satu kali per tahun / Less than one time per year'), (2, 'Beberapa kali per tahun / A few times per year'), (3, 'Beberapa kali per bulan / A few times per month'), (4, 'Berberapa kali per minggu / A few times per week'), (5, 'Lebih dari satu kali sehari / More than once per day'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('monitorcompliance', models.PositiveSmallIntegerField(choices=[(1, 'Kurang dari satu kali per tahun / Less than one time per year'), (2, 'Beberapa kali per tahun / A few times per year'), (3, 'Beberapa kali per bulan / A few times per month'), (4, 'Berberapa kali per minggu / A few times per week'), (5, 'Lebih dari satu kali sehari / More than once per day'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('enforcefreq', models.PositiveSmallIntegerField(choices=[(1, 'Kurang dari satu kali per tahun / Less than one time per year'), (2, 'Beberapa kali per tahun / A few times per year'), (3, 'Beberapa kali per bulan / A few times per month'), (4, 'Berberapa kali per minggu / A few times per week'), (5, 'Lebih dari satu kali sehari / More than once per day'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('fgd', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='api.FGD')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='stakeholder_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SpeciesRule',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('sppruleid', models.IntegerField(primary_key=True, serialize=False)),
('speciescommonl', models.CharField(default='995', max_length=255)),
('speciescommon', models.CharField(default='995', max_length=255)),
('family', models.CharField(default='995', max_length=255)),
('genus', models.CharField(default='995', max_length=255)),
('species', models.CharField(default='995', max_length=255)),
('spprule', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('sppspecificrulel', models.TextField(default='995')),
('sppspecificrule', models.TextField(default='995')),
('notes', models.TextField(default='995')),
('kii', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.KII')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='speciesrule_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Species',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('speciesid', models.IntegerField(primary_key=True, serialize=False)),
('speciescommonl', models.CharField(default='995', max_length=255)),
('speciescommon', models.CharField(default='995', max_length=255)),
('family', models.CharField(default='995', max_length=255)),
('genus', models.CharField(default='995', max_length=255)),
('species', models.CharField(default='995', max_length=255)),
('fgd', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='api.FGD')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='species_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'species',
},
),
migrations.CreateModel(
name='Settlement',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('settlementid', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('treatment', models.PositiveSmallIntegerField(choices=[(0, 'Control'), (1, 'Treatment'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('districtname', models.CharField(max_length=255)),
('districtcode', models.BigIntegerField(blank=True, default=995)),
('marketname1', models.CharField(default='995', max_length=255)),
('marketname2', models.CharField(default='995', max_length=255)),
('zone', models.CharField(default='995', max_length=255)),
('mpa', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.MPA')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='settlement_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Seascape',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('code', models.IntegerField(primary_key=True, serialize=False)),
('bahasaindonesia', models.CharField(blank=True, max_length=255)),
('english', models.CharField(blank=True, max_length=255)),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='seascape_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('code',),
},
),
migrations.CreateModel(
name='Rule',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('ruleid', models.IntegerField(primary_key=True, serialize=False)),
('rulecode', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1), api.models.base.MaxValueBCValidator(999)])),
('rulel', models.TextField(default='995')),
('rule', models.TextField(default='995')),
('fgd', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='api.FGD')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='rule_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Right',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('rightsid', models.IntegerField(primary_key=True, serialize=False)),
('usernamel', models.CharField(default='995', max_length=255)),
('username', models.CharField(default='995', max_length=255)),
('userrule', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('userspecrulel', models.CharField(default='995', max_length=255)),
('userspecrule', models.CharField(default='995', max_length=255)),
('govtsupport', models.PositiveSmallIntegerField(choices=[(1, 'Sangat menentang / Strongly oppose'), (2, 'Menentang / Oppose'), (3, 'Tidak menantang maupan mendukung / Neither oppose nor support'), (4, 'Mendukung / Support'), (5, 'Sangat mendukung / Strongly support'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('userrulesinc', models.PositiveSmallIntegerField(choices=[(1, 'Tidak dimasukkan / Not included'), (2, 'Dimasukkan sebagian / Partially included'), (3, 'Dimasukkan semua / Fully included'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('notes', models.TextField(default='995')),
('kii', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.KII')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='right_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='NonMarineOrganizationMembership',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('nmorganizationid', models.IntegerField(primary_key=True, serialize=False)),
('entryhouseholdid', models.BigIntegerField(default=995)),
('name', models.CharField(default='995', max_length=255)),
('position', models.IntegerField(choices=[(1, 'Anggota / Member'), (2, ' Pengurus / Official'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('meeting', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('days', models.PositiveIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(365)])),
('contribution', models.IntegerField(default=995)),
('contributionunits', models.CharField(default='995', max_length=255)),
('household', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.Household')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='nonmarineorganizationmembership_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='MPANetwork',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('code', models.IntegerField(primary_key=True, serialize=False)),
('bahasaindonesia', models.CharField(blank=True, max_length=255)),
('english', models.CharField(blank=True, max_length=255)),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='mpanetwork_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('code',),
},
),
migrations.AddField(
model_name='mpa',
name='mpanetwork',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='api.MPANetwork'),
),
migrations.AddField(
model_name='mpa',
name='seascape',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='api.Seascape'),
),
migrations.AddField(
model_name='mpa',
name='updated_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='mpa_updated_by', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='MonitoringStaff',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('staffid', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(blank=True, max_length=255)),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='monitoringstaff_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'monitoring staff',
},
),
migrations.CreateModel(
name='MarineOrganizationMembership',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('morganizationid', models.IntegerField(primary_key=True, serialize=False)),
('entryhouseholdid', models.BigIntegerField(default=995)),
('name', models.CharField(default='995', max_length=255)),
('position', models.IntegerField(choices=[(1, 'Anggota / Member'), (2, ' Pengurus / Official'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('meeting', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('days', models.PositiveIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(365)])),
('contribution', models.IntegerField(default=995)),
('contributionunits', models.CharField(default='995', max_length=255)),
('household', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.Household')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='marineorganizationmembership_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LocalThreat',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('localthreatid', models.IntegerField(primary_key=True, serialize=False)),
('entryhouseholdid', models.BigIntegerField(default=995)),
('localmarinethreat', models.CharField(default='995', max_length=255)),
('household', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.Household')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='localthreat_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LocalStep',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('localstepsid', models.IntegerField(primary_key=True, serialize=False)),
('entryhouseholdid', models.BigIntegerField(default=995)),
('localsteps', models.CharField(default='995', max_length=255)),
('household', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.Household')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='localstep_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LkpLivelihood',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('code', models.IntegerField(primary_key=True, serialize=False)),
('bahasaindonesia', models.CharField(blank=True, max_length=255)),
('english', models.CharField(blank=True, max_length=255)),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='lkplivelihood_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('code',),
},
),
migrations.CreateModel(
name='LkpFreqFishTime',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('code', models.IntegerField(primary_key=True, serialize=False)),
('bahasaindonesia', models.CharField(blank=True, max_length=255)),
('english', models.CharField(blank=True, max_length=255)),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='lkpfreqfishtime_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('code',),
},
),
migrations.CreateModel(
name='LkpFishTechnique',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('code', models.IntegerField(primary_key=True, serialize=False)),
('bahasaindonesia', models.CharField(blank=True, max_length=255)),
('english', models.CharField(blank=True, max_length=255)),
('consolidatedfishtechcategory', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='api.LkpFishTechCategory')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='lkpfishtechnique_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('code',),
},
),
migrations.CreateModel(
name='LkpAssetObtain',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('code', models.IntegerField(primary_key=True, serialize=False)),
('bahasaindonesia', models.CharField(blank=True, max_length=255)),
('english', models.CharField(blank=True, max_length=255)),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='lkpassetobtain_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('code',),
},
),
migrations.CreateModel(
name='LkpAssetAssistance',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('code', models.IntegerField(primary_key=True, serialize=False)),
('bahasaindonesia', models.CharField(blank=True, max_length=255)),
('english', models.CharField(blank=True, max_length=255)),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='lkpassetassistance_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('code',),
},
),
migrations.CreateModel(
name='KIISurveyVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('version', models.CharField(max_length=255)),
('notes', models.TextField(blank=True)),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='kiisurveyversion_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('id',),
},
),
migrations.AddField(
model_name='kii',
name='datacheckid',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='kii_staff_data_check', to='api.MonitoringStaff'),
),
migrations.AddField(
model_name='kii',
name='dataentryid',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='kii_staff_data_entry', to='api.MonitoringStaff'),
),
migrations.AddField(
model_name='kii',
name='fgd',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='api.FGD'),
),
migrations.AddField(
model_name='kii',
name='kiiversion',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.KIISurveyVersion'),
),
migrations.AddField(
model_name='kii',
name='primaryinterviewer',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='kii_primaryinterviewer', to='api.MonitoringStaff'),
),
migrations.AddField(
model_name='kii',
name='secondaryinterviewer',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='kii_secondaryinterviewer', to='api.MonitoringStaff'),
),
migrations.AddField(
model_name='kii',
name='settlement',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.Settlement'),
),
migrations.AddField(
model_name='kii',
name='updated_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='kii_updated_by', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='HouseholdSurveyVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('version', models.CharField(max_length=255)),
('notes', models.TextField(blank=True)),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='householdsurveyversion_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('id',),
},
),
migrations.AddField(
model_name='household',
name='assetbicycleassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetbicycleassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assetbicycleobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetbicycleobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='assetboatinboardassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetboatinboardassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assetboatinboardobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetboatinboardobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='assetboatnomotorassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetboatnomotorassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assetboatnomotorobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetboatnomotorobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='assetboatoutboardassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetboatoutboardassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assetboatoutboardobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetboatoutboardobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='assetcarassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetcarassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assetcarobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetcarobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='assetcdassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetcdassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assetcdobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetcdobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='assetcellphoneassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetcellphoneassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assetcellphoneobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetcellphoneobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='assetdvdassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetdvdassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assetdvdobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetdvdobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='assetgeneratorassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetgeneratorassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assetgeneratorobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetgeneratorobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='assetlandlinephoneassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetlandlinephoneassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assetlandlinephoneobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetlandlinephoneobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='assetmotorcycleassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetmotorcycleassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assetmotorcycleobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetmotorcycleobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='assetradioassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetradioassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assetradioobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetradioobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='assetsatelliteassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetsatelliteassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assetsatelliteobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetsatelliteobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='assetstereoassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetstereoassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assetstereoobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assetstereoobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='assettruckassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assettruckassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assettruckobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assettruckobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='assettvassistance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assettvassistance', to='api.LkpAssetAssistance'),
),
migrations.AddField(
model_name='household',
name='assettvobtain',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_assettvobtains', to='api.LkpAssetObtain'),
),
migrations.AddField(
model_name='household',
name='datacheckid',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_datacheckstaff', to='api.MonitoringStaff'),
),
migrations.AddField(
model_name='household',
name='dataentryid',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_dataentrystaff', to='api.MonitoringStaff'),
),
migrations.AddField(
model_name='household',
name='fieldcoordinator',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='household_fieldcoordinator', to='api.MonitoringStaff'),
),
migrations.AddField(
model_name='household',
name='freqeatfish',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='freqfishtime_freqeatfish', to='api.LkpFreqFishTime'),
),
migrations.AddField(
model_name='household',
name='freqfishtime',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='freqfishtime_freqfish', to='api.LkpFreqFishTime'),
),
migrations.AddField(
model_name='household',
name='freqsalefish',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='freqfishtime_freqsalefish', to='api.LkpFreqFishTime'),
),
migrations.AddField(
model_name='household',
name='majorfishtechnique',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, to='api.LkpFishTechCategory'),
),
migrations.AddField(
model_name='household',
name='percentincomefish',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='nonetoall_percentincomefish', to='api.LkpNoneToAllScale'),
),
migrations.AddField(
model_name='household',
name='percentproteinfish',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='nonetoall_percentproteinfish', to='api.LkpNoneToAllScale'),
),
migrations.AddField(
model_name='household',
name='primaryfishtechnique',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='primaryfishtechnique_households', to='api.LkpFishTechnique'),
),
migrations.AddField(
model_name='household',
name='primaryinterviewer',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='household_primaryinterviewer', to='api.MonitoringStaff'),
),
migrations.AddField(
model_name='household',
name='primarylivelihood',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='livelihood_primarylivelihood', to='api.LkpLivelihood'),
),
migrations.AddField(
model_name='household',
name='secondaryfishtechnique',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='secondaryfishtechnique_households', to='api.LkpFishTechnique'),
),
migrations.AddField(
model_name='household',
name='secondaryinterviewer',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='household_secondaryinterviewer', to='api.MonitoringStaff'),
),
migrations.AddField(
model_name='household',
name='secondarylivelihood',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='livelihood_secondarylivelihood', to='api.LkpLivelihood'),
),
migrations.AddField(
model_name='household',
name='settlement',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.Settlement'),
),
migrations.AddField(
model_name='household',
name='surveyversionnumber',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.HouseholdSurveyVersion'),
),
migrations.AddField(
model_name='household',
name='tertiaryfishtechnique',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='tertiaryfishtechnique_households', to='api.LkpFishTechnique'),
),
migrations.AddField(
model_name='household',
name='tertiarylivelihood',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='livelihood_tertiarylivelihood', to='api.LkpLivelihood'),
),
migrations.AddField(
model_name='household',
name='updated_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='household_updated_by', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='HabitatRule',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('habrulesid', models.IntegerField(primary_key=True, serialize=False)),
('habnamel', models.CharField(default='995', max_length=255)),
('habname', models.CharField(default='995', max_length=255)),
('habrule', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('habspecificrulel', models.TextField(default='995')),
('habspecificrule', models.TextField(default='995')),
('notes', models.TextField(default='995')),
('kii', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.KII')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='habitatrule_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Habitat',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('habitatid', models.IntegerField(primary_key=True, serialize=False)),
('habitatcode', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1), api.models.base.MaxValueBCValidator(999)])),
('habitattypel', models.CharField(default='995', max_length=255)),
('habitattype', models.CharField(default='995', max_length=255)),
('fgd', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.FGD')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='habitat_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GlobalThreat',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('globalthreatid', models.IntegerField(primary_key=True, serialize=False)),
('entryhouseholdid', models.BigIntegerField(default=995)),
('globalmarinethreat', models.CharField(default='995', max_length=255)),
('household', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.Household')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='globalthreat_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GlobalStep',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('globalstepsid', models.IntegerField(primary_key=True, serialize=False)),
('entryhouseholdid', models.BigIntegerField(default=995)),
('globalmarinesteps', models.CharField(default='995', max_length=255)),
('household', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.Household')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='globalstep_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='FGDSurveyVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('version', models.CharField(max_length=255)),
('notes', models.TextField(blank=True)),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='fgdsurveyversion_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('id',),
},
),
migrations.AddField(
model_name='fgd',
name='datacheckid',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='fgd_staff_data_check', to='api.MonitoringStaff'),
),
migrations.AddField(
model_name='fgd',
name='dataentryid',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='fgd_staff_data_entry', to='api.MonitoringStaff'),
),
migrations.AddField(
model_name='fgd',
name='facilitator',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='fgd_staff_facilitator', to='api.MonitoringStaff'),
),
migrations.AddField(
model_name='fgd',
name='fgdversion',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, to='api.FGDSurveyVersion'),
),
migrations.AddField(
model_name='fgd',
name='notetaker',
field=models.ForeignKey(default=995, on_delete=django.db.models.deletion.PROTECT, related_name='fgd_staff_notetaker', to='api.MonitoringStaff'),
),
migrations.AddField(
model_name='fgd',
name='settlement',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.Settlement'),
),
migrations.AddField(
model_name='fgd',
name='updated_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='fgd_updated_by', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='Demographic',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('demographicid', models.IntegerField(primary_key=True, serialize=False)),
('entryhouseholdid', models.BigIntegerField(default=995)),
('demographiccode', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(1), api.models.base.MaxValueBCValidator(999)])),
('relationhhh', models.IntegerField(choices=[(0, 'Kepala keluarga'), (1, 'Pasangan (suami/istri) / Spouse'), (2, 'Anak / Child'), (3, 'Ibu/Ayah mertua / Father/Mother in law'), (4, 'Cucu / Grandchild'), (5, 'Orang tua / Parent'), (6, 'Anak mantu or Anak menantu / Child in law'), (7, 'Saudara laki-laki/perempuan / Sibling'), (8, 'Ipar / Sibling in law'), (9, 'Paman/Bibi (Om/Tante) / Uncle or Aunt'), (10, 'Keponakan / Nephew or Neice'), (11, 'Anak tiri or Anak angkat / Foster child'), (12, 'Keluarga lainnya / Other family member'), (13, 'Tidak ada hubungan kekerabatan / Not related to family'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('individualage', models.DecimalField(decimal_places=2, default=995, max_digits=5, validators=[api.models.base.MinValueBCValidator(0), api.models.base.MaxValueBCValidator(150)])),
('individualgender', models.IntegerField(choices=[(1, 'Laki-Laki / Male'), (2, ' Perempuan / Female'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('individualeducation', models.CharField(default='995', max_length=255)),
('individualedlevel', models.IntegerField(choices=[(0, 'Tidak Ada Pendidikan Formal / No Formal Education'), (1, 'Taman Kanak-kanak / Pre-School'), (2, 'Sekolah Dasar (SD) / Primary School'), (3, 'Sekolah Menengah Pertama (SMP) / Middle School '), (4, 'Sekolah Menengah Atas (SMA) dan Sekolah Menengah Kejuruan (SMK)/ Secondary School'), (5, ' Ahli Madya Diploma 3 dan lebih tinggi (S1, S2, S3) / Post Secondary School'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('individualenrolled', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('householdhead', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('individualunwell', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('individualdaysunwell', models.PositiveIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(31)])),
('individuallostdays', models.PositiveIntegerField(default=995, validators=[api.models.base.MaxValueBCValidator(31)])),
('household', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.Household')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='demographic_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Death',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('deathid', models.IntegerField(primary_key=True, serialize=False)),
('entryhouseholdid', models.BigIntegerField(default=995)),
('gender', models.IntegerField(choices=[(1, 'Laki-Laki / Male'), (2, ' Perempuan / Female'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('ageatdeath', models.DecimalField(decimal_places=2, default=995, max_digits=5, validators=[api.models.base.MinValueBCValidator(0), api.models.base.MaxValueBCValidator(150)])),
('datedeath', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(2000), api.models.base.MaxValueBCValidator(2022)])),
('household', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.Household')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='death_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Birth',
fields=[
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('birthid', models.IntegerField(primary_key=True, serialize=False)),
('entryhouseholdid', models.BigIntegerField(default=995)),
('infantsurvived', models.PositiveSmallIntegerField(choices=[(0, 'Tidak / No'), (1, 'Ya / Yes'), (993, 'Pertanyaan tidak diminta dalam survei ini (tidak ada skip logic) / Question not asked as part of this survey'), (994, 'Pertanyaan dilewati berdasarkan skip logic survei / Question skipped based on survey skip logic'), (995, 'Tidak Ada data / No data'), (996, 'Lainnya / Other'), (997, 'Tidak tahu / Do not know'), (998, 'Tidak sesuai / Not applicable'), (999, 'Menolak / Refused')], default=995)),
('dateofdeath', models.PositiveSmallIntegerField(default=995, validators=[api.models.base.MinValueBCValidator(2000), api.models.base.MaxValueBCValidator(2022)])),
('household', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='api.Household')),
('updated_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='birth_updated_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| 130.227894
| 1,297
| 0.656412
| 20,942
| 181,147
| 5.626397
| 0.049947
| 0.040559
| 0.027039
| 0.037555
| 0.880937
| 0.878127
| 0.870905
| 0.836482
| 0.835837
| 0.806566
| 0
| 0.058372
| 0.196415
| 181,147
| 1,390
| 1,298
| 130.321583
| 0.75107
| 0.000237
| 0
| 0.479393
| 1
| 0.127983
| 0.440928
| 0.023401
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.02603
| 0.004338
| 0
| 0.007231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
af37947379434b7153c9290c921ce257fedb1bb4
| 61
|
py
|
Python
|
pyqentangle/cythonmodule/__init__.py
|
stephenhky/PyQEntangle
|
f06b63ac89952c1878555af0f2b4f079d11237d2
|
[
"MIT"
] | 15
|
2017-05-25T17:09:53.000Z
|
2021-12-07T14:36:15.000Z
|
pyqentangle/cythonmodule/__init__.py
|
stephenhky/PyQEntangle
|
f06b63ac89952c1878555af0f2b4f079d11237d2
|
[
"MIT"
] | 9
|
2019-04-07T04:52:57.000Z
|
2020-11-05T04:27:55.000Z
|
pyqentangle/cythonmodule/__init__.py
|
stephenhky/PyQEntangle
|
f06b63ac89952c1878555af0f2b4f079d11237d2
|
[
"MIT"
] | 5
|
2019-03-12T03:45:10.000Z
|
2021-03-25T18:56:04.000Z
|
from . import interpolate_nocheck
from . import interpolate
| 15.25
| 33
| 0.819672
| 7
| 61
| 7
| 0.571429
| 0.408163
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147541
| 61
| 3
| 34
| 20.333333
| 0.942308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
af4121387fd128687ec1a74ae4c9592803d6bc38
| 82
|
py
|
Python
|
snapx/snapx/algorithms/__init__.py
|
averyr2/snap-python
|
8173f1281d6b26e66903753af387c59225b23783
|
[
"BSD-3-Clause"
] | 1
|
2020-09-08T14:51:46.000Z
|
2020-09-08T14:51:46.000Z
|
snapx/snapx/algorithms/__init__.py
|
ameya98/snap-python
|
da987be9921de8fa8018630c49dd23c33c33d361
|
[
"BSD-3-Clause"
] | null | null | null |
snapx/snapx/algorithms/__init__.py
|
ameya98/snap-python
|
da987be9921de8fa8018630c49dd23c33c33d361
|
[
"BSD-3-Clause"
] | null | null | null |
from snapx.algorithms.centrality import *
from snapx.algorithms.community import *
| 41
| 41
| 0.841463
| 10
| 82
| 6.9
| 0.6
| 0.26087
| 0.550725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085366
| 82
| 2
| 42
| 41
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
afd1a44e9dc4303abb911acad977417aa6c5bf5d
| 5,499
|
py
|
Python
|
pyaz/cloud/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/cloud/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/cloud/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
'''
Manage registered Azure clouds.
'''
from .. pyaz_utils import _call_az
def list():
'''
List registered clouds.
'''
return _call_az("az cloud list", locals())
def show(name=None):
'''
Get the details of a registered cloud.
Optional Parameters:
- name -- Name of a registered cloud.
'''
return _call_az("az cloud show", locals())
def register(name, cloud_config=None, endpoint_active_directory=None, endpoint_active_directory_data_lake_resource_id=None, endpoint_active_directory_graph_resource_id=None, endpoint_active_directory_resource_id=None, endpoint_gallery=None, endpoint_management=None, endpoint_resource_manager=None, endpoint_sql_management=None, endpoint_vm_image_alias_doc=None, profile=None, suffix_acr_login_server_endpoint=None, suffix_azure_datalake_analytics_catalog_and_job_endpoint=None, suffix_azure_datalake_store_file_system_endpoint=None, suffix_keyvault_dns=None, suffix_sql_server_hostname=None, suffix_storage_endpoint=None):
'''
Register a cloud.
Required Parameters:
- name -- Name of a registered cloud
Optional Parameters:
- cloud_config -- JSON encoded cloud configuration. Use @{file} to load from a file.
- endpoint_active_directory -- The Active Directory login endpoint
- endpoint_active_directory_data_lake_resource_id -- The Active Directory resource ID for data lake services
- endpoint_active_directory_graph_resource_id -- The Active Directory resource ID
- endpoint_active_directory_resource_id -- The resource ID to obtain AD tokens for
- endpoint_gallery -- The template gallery endpoint
- endpoint_management -- The management service endpoint
- endpoint_resource_manager -- The resource management endpoint
- endpoint_sql_management -- The sql server management endpoint
- endpoint_vm_image_alias_doc -- The uri of the document which caches commonly used virtual machine images
- profile -- Profile to use for this cloud
- suffix_acr_login_server_endpoint -- The Azure Container Registry login server suffix
- suffix_azure_datalake_analytics_catalog_and_job_endpoint -- The Data Lake analytics job and catalog service dns suffix
- suffix_azure_datalake_store_file_system_endpoint -- The Data Lake store filesystem service dns suffix
- suffix_keyvault_dns -- The Key Vault service dns suffix
- suffix_sql_server_hostname -- The dns suffix for sql servers
- suffix_storage_endpoint -- The endpoint suffix for storage accounts
'''
return _call_az("az cloud register", locals())
def unregister(name):
'''
Unregister a cloud.
Required Parameters:
- name -- Name of a registered cloud
'''
return _call_az("az cloud unregister", locals())
def set(name, profile=None):
'''
Set the active cloud.
Required Parameters:
- name -- Name of a registered cloud
Optional Parameters:
- profile -- Profile to use for this cloud
'''
return _call_az("az cloud set", locals())
def update(cloud_config=None, endpoint_active_directory=None, endpoint_active_directory_data_lake_resource_id=None, endpoint_active_directory_graph_resource_id=None, endpoint_active_directory_resource_id=None, endpoint_gallery=None, endpoint_management=None, endpoint_resource_manager=None, endpoint_sql_management=None, endpoint_vm_image_alias_doc=None, name=None, profile=None, suffix_acr_login_server_endpoint=None, suffix_azure_datalake_analytics_catalog_and_job_endpoint=None, suffix_azure_datalake_store_file_system_endpoint=None, suffix_keyvault_dns=None, suffix_sql_server_hostname=None, suffix_storage_endpoint=None):
'''
Update the configuration of a cloud.
Optional Parameters:
- cloud_config -- JSON encoded cloud configuration. Use @{file} to load from a file.
- endpoint_active_directory -- The Active Directory login endpoint
- endpoint_active_directory_data_lake_resource_id -- The Active Directory resource ID for data lake services
- endpoint_active_directory_graph_resource_id -- The Active Directory resource ID
- endpoint_active_directory_resource_id -- The resource ID to obtain AD tokens for
- endpoint_gallery -- The template gallery endpoint
- endpoint_management -- The management service endpoint
- endpoint_resource_manager -- The resource management endpoint
- endpoint_sql_management -- The sql server management endpoint
- endpoint_vm_image_alias_doc -- The uri of the document which caches commonly used virtual machine images
- name -- Name of a registered cloud.
- profile -- Profile to use for this cloud
- suffix_acr_login_server_endpoint -- The Azure Container Registry login server suffix
- suffix_azure_datalake_analytics_catalog_and_job_endpoint -- The Data Lake analytics job and catalog service dns suffix
- suffix_azure_datalake_store_file_system_endpoint -- The Data Lake store filesystem service dns suffix
- suffix_keyvault_dns -- The Key Vault service dns suffix
- suffix_sql_server_hostname -- The dns suffix for sql servers
- suffix_storage_endpoint -- The endpoint suffix for storage accounts
'''
return _call_az("az cloud update", locals())
def list_profiles(name=None, show_all=None):
'''
List the supported profiles for a cloud.
Optional Parameters:
- name -- Name of a registered cloud.
- show_all -- Show all available profiles supported in the CLI.
'''
return _call_az("az cloud list-profiles", locals())
| 49.098214
| 626
| 0.773231
| 733
| 5,499
| 5.489768
| 0.136426
| 0.082008
| 0.091451
| 0.053678
| 0.895875
| 0.895875
| 0.87003
| 0.862326
| 0.862326
| 0.846918
| 0
| 0
| 0.162757
| 5,499
| 111
| 627
| 49.540541
| 0.874023
| 0.613202
| 0
| 0
| 0
| 0
| 0.061224
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.466667
| false
| 0
| 0.066667
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
bb6ad2e1ddc6e68239a31fb77ce22a97a077e620
| 26
|
py
|
Python
|
brreg_announce/__init__.py
|
anderser/brreg_announce
|
88ca9aafea48733055972ab1a5f82c380c11f742
|
[
"MIT"
] | null | null | null |
brreg_announce/__init__.py
|
anderser/brreg_announce
|
88ca9aafea48733055972ab1a5f82c380c11f742
|
[
"MIT"
] | 1
|
2021-03-31T19:58:04.000Z
|
2021-03-31T19:58:04.000Z
|
brreg_announce/__init__.py
|
anderser/brreg_announce
|
88ca9aafea48733055972ab1a5f82c380c11f742
|
[
"MIT"
] | null | null | null |
def version():
return 0.4
| 13
| 14
| 0.692308
| 5
| 26
| 3.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0.153846
| 26
| 2
| 15
| 13
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
bb6b6d9b3822ee2a84b19ac10c7453cae010ea5d
| 262,718
|
py
|
Python
|
google/identity/accesscontextmanager/v1/identity-accesscontextmanager-v1-py/tests/unit/gapic/accesscontextmanager_v1/test_access_context_manager.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/identity/accesscontextmanager/v1/identity-accesscontextmanager-v1-py/tests/unit/gapic/accesscontextmanager_v1/test_access_context_manager.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/identity/accesscontextmanager/v1/identity-accesscontextmanager-v1-py/tests/unit/gapic/accesscontextmanager_v1/test_access_context_manager.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.identity.accesscontextmanager.type import device_resources_pb2 # type: ignore
from google.identity.accesscontextmanager_v1.services.access_context_manager import AccessContextManagerAsyncClient
from google.identity.accesscontextmanager_v1.services.access_context_manager import AccessContextManagerClient
from google.identity.accesscontextmanager_v1.services.access_context_manager import pagers
from google.identity.accesscontextmanager_v1.services.access_context_manager import transports
from google.identity.accesscontextmanager_v1.services.access_context_manager.transports.base import _GOOGLE_AUTH_VERSION
from google.identity.accesscontextmanager_v1.types import access_context_manager
from google.identity.accesscontextmanager_v1.types import access_level
from google.identity.accesscontextmanager_v1.types import access_level as gia_access_level
from google.identity.accesscontextmanager_v1.types import access_policy
from google.identity.accesscontextmanager_v1.types import gcp_user_access_binding
from google.identity.accesscontextmanager_v1.types import gcp_user_access_binding as gia_gcp_user_access_binding
from google.identity.accesscontextmanager_v1.types import service_perimeter
from google.identity.accesscontextmanager_v1.types import service_perimeter as gia_service_perimeter
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.type import expr_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert AccessContextManagerClient._get_default_mtls_endpoint(None) is None
assert AccessContextManagerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert AccessContextManagerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert AccessContextManagerClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert AccessContextManagerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert AccessContextManagerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
AccessContextManagerClient,
AccessContextManagerAsyncClient,
])
def test_access_context_manager_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'accesscontextmanager.googleapis.com:443'
@pytest.mark.parametrize("transport_class,transport_name", [
(transports.AccessContextManagerGrpcTransport, "grpc"),
(transports.AccessContextManagerGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_access_context_manager_client_service_account_always_use_jwt(transport_class, transport_name):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [
AccessContextManagerClient,
AccessContextManagerAsyncClient,
])
def test_access_context_manager_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'accesscontextmanager.googleapis.com:443'
def test_access_context_manager_client_get_transport_class():
transport = AccessContextManagerClient.get_transport_class()
available_transports = [
transports.AccessContextManagerGrpcTransport,
]
assert transport in available_transports
transport = AccessContextManagerClient.get_transport_class("grpc")
assert transport == transports.AccessContextManagerGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(AccessContextManagerClient, transports.AccessContextManagerGrpcTransport, "grpc"),
(AccessContextManagerAsyncClient, transports.AccessContextManagerGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(AccessContextManagerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AccessContextManagerClient))
@mock.patch.object(AccessContextManagerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AccessContextManagerAsyncClient))
def test_access_context_manager_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(AccessContextManagerClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(AccessContextManagerClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(AccessContextManagerClient, transports.AccessContextManagerGrpcTransport, "grpc", "true"),
(AccessContextManagerAsyncClient, transports.AccessContextManagerGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(AccessContextManagerClient, transports.AccessContextManagerGrpcTransport, "grpc", "false"),
(AccessContextManagerAsyncClient, transports.AccessContextManagerGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(AccessContextManagerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AccessContextManagerClient))
@mock.patch.object(AccessContextManagerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AccessContextManagerAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_access_context_manager_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(AccessContextManagerClient, transports.AccessContextManagerGrpcTransport, "grpc"),
(AccessContextManagerAsyncClient, transports.AccessContextManagerGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_access_context_manager_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(AccessContextManagerClient, transports.AccessContextManagerGrpcTransport, "grpc"),
(AccessContextManagerAsyncClient, transports.AccessContextManagerGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_access_context_manager_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_access_context_manager_client_client_options_from_dict():
with mock.patch('google.identity.accesscontextmanager_v1.services.access_context_manager.transports.AccessContextManagerGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = AccessContextManagerClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_list_access_policies(transport: str = 'grpc', request_type=access_context_manager.ListAccessPoliciesRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_policies),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_context_manager.ListAccessPoliciesResponse(
next_page_token='next_page_token_value',
)
response = client.list_access_policies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ListAccessPoliciesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAccessPoliciesPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_access_policies_from_dict():
test_list_access_policies(request_type=dict)
def test_list_access_policies_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_policies),
'__call__') as call:
client.list_access_policies()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ListAccessPoliciesRequest()
@pytest.mark.asyncio
async def test_list_access_policies_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.ListAccessPoliciesRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_policies),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(access_context_manager.ListAccessPoliciesResponse(
next_page_token='next_page_token_value',
))
response = await client.list_access_policies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ListAccessPoliciesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAccessPoliciesAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_access_policies_async_from_dict():
await test_list_access_policies_async(request_type=dict)
def test_list_access_policies_pager():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_policies),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListAccessPoliciesResponse(
access_policies=[
access_policy.AccessPolicy(),
access_policy.AccessPolicy(),
access_policy.AccessPolicy(),
],
next_page_token='abc',
),
access_context_manager.ListAccessPoliciesResponse(
access_policies=[],
next_page_token='def',
),
access_context_manager.ListAccessPoliciesResponse(
access_policies=[
access_policy.AccessPolicy(),
],
next_page_token='ghi',
),
access_context_manager.ListAccessPoliciesResponse(
access_policies=[
access_policy.AccessPolicy(),
access_policy.AccessPolicy(),
],
),
RuntimeError,
)
metadata = ()
pager = client.list_access_policies(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, access_policy.AccessPolicy)
for i in results)
def test_list_access_policies_pages():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_policies),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListAccessPoliciesResponse(
access_policies=[
access_policy.AccessPolicy(),
access_policy.AccessPolicy(),
access_policy.AccessPolicy(),
],
next_page_token='abc',
),
access_context_manager.ListAccessPoliciesResponse(
access_policies=[],
next_page_token='def',
),
access_context_manager.ListAccessPoliciesResponse(
access_policies=[
access_policy.AccessPolicy(),
],
next_page_token='ghi',
),
access_context_manager.ListAccessPoliciesResponse(
access_policies=[
access_policy.AccessPolicy(),
access_policy.AccessPolicy(),
],
),
RuntimeError,
)
pages = list(client.list_access_policies(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_access_policies_async_pager():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_policies),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListAccessPoliciesResponse(
access_policies=[
access_policy.AccessPolicy(),
access_policy.AccessPolicy(),
access_policy.AccessPolicy(),
],
next_page_token='abc',
),
access_context_manager.ListAccessPoliciesResponse(
access_policies=[],
next_page_token='def',
),
access_context_manager.ListAccessPoliciesResponse(
access_policies=[
access_policy.AccessPolicy(),
],
next_page_token='ghi',
),
access_context_manager.ListAccessPoliciesResponse(
access_policies=[
access_policy.AccessPolicy(),
access_policy.AccessPolicy(),
],
),
RuntimeError,
)
async_pager = await client.list_access_policies(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, access_policy.AccessPolicy)
for i in responses)
@pytest.mark.asyncio
async def test_list_access_policies_async_pages():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_policies),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListAccessPoliciesResponse(
access_policies=[
access_policy.AccessPolicy(),
access_policy.AccessPolicy(),
access_policy.AccessPolicy(),
],
next_page_token='abc',
),
access_context_manager.ListAccessPoliciesResponse(
access_policies=[],
next_page_token='def',
),
access_context_manager.ListAccessPoliciesResponse(
access_policies=[
access_policy.AccessPolicy(),
],
next_page_token='ghi',
),
access_context_manager.ListAccessPoliciesResponse(
access_policies=[
access_policy.AccessPolicy(),
access_policy.AccessPolicy(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_access_policies(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_get_access_policy(transport: str = 'grpc', request_type=access_context_manager.GetAccessPolicyRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_access_policy),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_policy.AccessPolicy(
name='name_value',
parent='parent_value',
title='title_value',
etag='etag_value',
)
response = client.get_access_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.GetAccessPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, access_policy.AccessPolicy)
assert response.name == 'name_value'
assert response.parent == 'parent_value'
assert response.title == 'title_value'
assert response.etag == 'etag_value'
def test_get_access_policy_from_dict():
test_get_access_policy(request_type=dict)
def test_get_access_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_access_policy),
'__call__') as call:
client.get_access_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.GetAccessPolicyRequest()
@pytest.mark.asyncio
async def test_get_access_policy_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.GetAccessPolicyRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_access_policy),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(access_policy.AccessPolicy(
name='name_value',
parent='parent_value',
title='title_value',
etag='etag_value',
))
response = await client.get_access_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.GetAccessPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, access_policy.AccessPolicy)
assert response.name == 'name_value'
assert response.parent == 'parent_value'
assert response.title == 'title_value'
assert response.etag == 'etag_value'
@pytest.mark.asyncio
async def test_get_access_policy_async_from_dict():
await test_get_access_policy_async(request_type=dict)
def test_get_access_policy_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.GetAccessPolicyRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_access_policy),
'__call__') as call:
call.return_value = access_policy.AccessPolicy()
client.get_access_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_access_policy_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.GetAccessPolicyRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_access_policy),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(access_policy.AccessPolicy())
await client.get_access_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_access_policy_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_access_policy),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_policy.AccessPolicy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_access_policy(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_access_policy_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_access_policy(
access_context_manager.GetAccessPolicyRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_access_policy_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_access_policy),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_policy.AccessPolicy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(access_policy.AccessPolicy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_access_policy(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_access_policy_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_access_policy(
access_context_manager.GetAccessPolicyRequest(),
name='name_value',
)
def test_create_access_policy(transport: str = 'grpc', request_type=access_policy.AccessPolicy):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_access_policy),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.create_access_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_policy.AccessPolicy()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_access_policy_from_dict():
test_create_access_policy(request_type=dict)
def test_create_access_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_access_policy),
'__call__') as call:
client.create_access_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_policy.AccessPolicy()
@pytest.mark.asyncio
async def test_create_access_policy_async(transport: str = 'grpc_asyncio', request_type=access_policy.AccessPolicy):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_access_policy),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.create_access_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_policy.AccessPolicy()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_access_policy_async_from_dict():
await test_create_access_policy_async(request_type=dict)
def test_update_access_policy(transport: str = 'grpc', request_type=access_context_manager.UpdateAccessPolicyRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_access_policy),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.update_access_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.UpdateAccessPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_access_policy_from_dict():
test_update_access_policy(request_type=dict)
def test_update_access_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_access_policy),
'__call__') as call:
client.update_access_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.UpdateAccessPolicyRequest()
@pytest.mark.asyncio
async def test_update_access_policy_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.UpdateAccessPolicyRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_access_policy),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.update_access_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.UpdateAccessPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_access_policy_async_from_dict():
await test_update_access_policy_async(request_type=dict)
def test_update_access_policy_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.UpdateAccessPolicyRequest()
request.policy.name = 'policy.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_access_policy),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.update_access_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'policy.name=policy.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_access_policy_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.UpdateAccessPolicyRequest()
request.policy.name = 'policy.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_access_policy),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.update_access_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'policy.name=policy.name/value',
) in kw['metadata']
def test_update_access_policy_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_access_policy),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_access_policy(
policy=access_policy.AccessPolicy(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].policy == access_policy.AccessPolicy(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
def test_update_access_policy_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_access_policy(
access_context_manager.UpdateAccessPolicyRequest(),
policy=access_policy.AccessPolicy(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
@pytest.mark.asyncio
async def test_update_access_policy_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_access_policy),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_access_policy(
policy=access_policy.AccessPolicy(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].policy == access_policy.AccessPolicy(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
@pytest.mark.asyncio
async def test_update_access_policy_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_access_policy(
access_context_manager.UpdateAccessPolicyRequest(),
policy=access_policy.AccessPolicy(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
def test_delete_access_policy(transport: str = 'grpc', request_type=access_context_manager.DeleteAccessPolicyRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_access_policy),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.delete_access_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.DeleteAccessPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_access_policy_from_dict():
test_delete_access_policy(request_type=dict)
def test_delete_access_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_access_policy),
'__call__') as call:
client.delete_access_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.DeleteAccessPolicyRequest()
@pytest.mark.asyncio
async def test_delete_access_policy_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.DeleteAccessPolicyRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_access_policy),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.delete_access_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.DeleteAccessPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_access_policy_async_from_dict():
await test_delete_access_policy_async(request_type=dict)
def test_delete_access_policy_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.DeleteAccessPolicyRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_access_policy),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.delete_access_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_access_policy_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.DeleteAccessPolicyRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_access_policy),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.delete_access_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_access_policy_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_access_policy),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_access_policy(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_access_policy_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_access_policy(
access_context_manager.DeleteAccessPolicyRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_access_policy_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_access_policy),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_access_policy(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_access_policy_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_access_policy(
access_context_manager.DeleteAccessPolicyRequest(),
name='name_value',
)
def test_list_access_levels(transport: str = 'grpc', request_type=access_context_manager.ListAccessLevelsRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_levels),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_context_manager.ListAccessLevelsResponse(
next_page_token='next_page_token_value',
)
response = client.list_access_levels(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ListAccessLevelsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAccessLevelsPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_access_levels_from_dict():
test_list_access_levels(request_type=dict)
def test_list_access_levels_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_levels),
'__call__') as call:
client.list_access_levels()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ListAccessLevelsRequest()
@pytest.mark.asyncio
async def test_list_access_levels_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.ListAccessLevelsRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_levels),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(access_context_manager.ListAccessLevelsResponse(
next_page_token='next_page_token_value',
))
response = await client.list_access_levels(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ListAccessLevelsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAccessLevelsAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_access_levels_async_from_dict():
await test_list_access_levels_async(request_type=dict)
def test_list_access_levels_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.ListAccessLevelsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_levels),
'__call__') as call:
call.return_value = access_context_manager.ListAccessLevelsResponse()
client.list_access_levels(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_access_levels_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.ListAccessLevelsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_levels),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(access_context_manager.ListAccessLevelsResponse())
await client.list_access_levels(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_access_levels_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_levels),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_context_manager.ListAccessLevelsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_access_levels(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_access_levels_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_access_levels(
access_context_manager.ListAccessLevelsRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_access_levels_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_levels),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_context_manager.ListAccessLevelsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(access_context_manager.ListAccessLevelsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_access_levels(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_access_levels_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_access_levels(
access_context_manager.ListAccessLevelsRequest(),
parent='parent_value',
)
def test_list_access_levels_pager():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_levels),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListAccessLevelsResponse(
access_levels=[
access_level.AccessLevel(),
access_level.AccessLevel(),
access_level.AccessLevel(),
],
next_page_token='abc',
),
access_context_manager.ListAccessLevelsResponse(
access_levels=[],
next_page_token='def',
),
access_context_manager.ListAccessLevelsResponse(
access_levels=[
access_level.AccessLevel(),
],
next_page_token='ghi',
),
access_context_manager.ListAccessLevelsResponse(
access_levels=[
access_level.AccessLevel(),
access_level.AccessLevel(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_access_levels(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, access_level.AccessLevel)
for i in results)
def test_list_access_levels_pages():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_levels),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListAccessLevelsResponse(
access_levels=[
access_level.AccessLevel(),
access_level.AccessLevel(),
access_level.AccessLevel(),
],
next_page_token='abc',
),
access_context_manager.ListAccessLevelsResponse(
access_levels=[],
next_page_token='def',
),
access_context_manager.ListAccessLevelsResponse(
access_levels=[
access_level.AccessLevel(),
],
next_page_token='ghi',
),
access_context_manager.ListAccessLevelsResponse(
access_levels=[
access_level.AccessLevel(),
access_level.AccessLevel(),
],
),
RuntimeError,
)
pages = list(client.list_access_levels(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_access_levels_async_pager():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_levels),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListAccessLevelsResponse(
access_levels=[
access_level.AccessLevel(),
access_level.AccessLevel(),
access_level.AccessLevel(),
],
next_page_token='abc',
),
access_context_manager.ListAccessLevelsResponse(
access_levels=[],
next_page_token='def',
),
access_context_manager.ListAccessLevelsResponse(
access_levels=[
access_level.AccessLevel(),
],
next_page_token='ghi',
),
access_context_manager.ListAccessLevelsResponse(
access_levels=[
access_level.AccessLevel(),
access_level.AccessLevel(),
],
),
RuntimeError,
)
async_pager = await client.list_access_levels(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, access_level.AccessLevel)
for i in responses)
@pytest.mark.asyncio
async def test_list_access_levels_async_pages():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_access_levels),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListAccessLevelsResponse(
access_levels=[
access_level.AccessLevel(),
access_level.AccessLevel(),
access_level.AccessLevel(),
],
next_page_token='abc',
),
access_context_manager.ListAccessLevelsResponse(
access_levels=[],
next_page_token='def',
),
access_context_manager.ListAccessLevelsResponse(
access_levels=[
access_level.AccessLevel(),
],
next_page_token='ghi',
),
access_context_manager.ListAccessLevelsResponse(
access_levels=[
access_level.AccessLevel(),
access_level.AccessLevel(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_access_levels(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_get_access_level(transport: str = 'grpc', request_type=access_context_manager.GetAccessLevelRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_level.AccessLevel(
name='name_value',
title='title_value',
description='description_value',
basic=access_level.BasicLevel(conditions=[access_level.Condition(ip_subnetworks=['ip_subnetworks_value'])]),
)
response = client.get_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.GetAccessLevelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, access_level.AccessLevel)
assert response.name == 'name_value'
assert response.title == 'title_value'
assert response.description == 'description_value'
def test_get_access_level_from_dict():
test_get_access_level(request_type=dict)
def test_get_access_level_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_access_level),
'__call__') as call:
client.get_access_level()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.GetAccessLevelRequest()
@pytest.mark.asyncio
async def test_get_access_level_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.GetAccessLevelRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(access_level.AccessLevel(
name='name_value',
title='title_value',
description='description_value',
))
response = await client.get_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.GetAccessLevelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, access_level.AccessLevel)
assert response.name == 'name_value'
assert response.title == 'title_value'
assert response.description == 'description_value'
@pytest.mark.asyncio
async def test_get_access_level_async_from_dict():
await test_get_access_level_async(request_type=dict)
def test_get_access_level_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.GetAccessLevelRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_access_level),
'__call__') as call:
call.return_value = access_level.AccessLevel()
client.get_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_access_level_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.GetAccessLevelRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_access_level),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(access_level.AccessLevel())
await client.get_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_access_level_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_level.AccessLevel()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_access_level(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_access_level_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_access_level(
access_context_manager.GetAccessLevelRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_access_level_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_level.AccessLevel()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(access_level.AccessLevel())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_access_level(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_access_level_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_access_level(
access_context_manager.GetAccessLevelRequest(),
name='name_value',
)
def test_create_access_level(transport: str = 'grpc', request_type=access_context_manager.CreateAccessLevelRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.create_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.CreateAccessLevelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_access_level_from_dict():
test_create_access_level(request_type=dict)
def test_create_access_level_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_access_level),
'__call__') as call:
client.create_access_level()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.CreateAccessLevelRequest()
@pytest.mark.asyncio
async def test_create_access_level_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.CreateAccessLevelRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.create_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.CreateAccessLevelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_access_level_async_from_dict():
await test_create_access_level_async(request_type=dict)
def test_create_access_level_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.CreateAccessLevelRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_access_level),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.create_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_access_level_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.CreateAccessLevelRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_access_level),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.create_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_access_level_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_access_level(
parent='parent_value',
access_level=gia_access_level.AccessLevel(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].access_level == gia_access_level.AccessLevel(name='name_value')
def test_create_access_level_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_access_level(
access_context_manager.CreateAccessLevelRequest(),
parent='parent_value',
access_level=gia_access_level.AccessLevel(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_access_level_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_access_level(
parent='parent_value',
access_level=gia_access_level.AccessLevel(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].access_level == gia_access_level.AccessLevel(name='name_value')
@pytest.mark.asyncio
async def test_create_access_level_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_access_level(
access_context_manager.CreateAccessLevelRequest(),
parent='parent_value',
access_level=gia_access_level.AccessLevel(name='name_value'),
)
def test_update_access_level(transport: str = 'grpc', request_type=access_context_manager.UpdateAccessLevelRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.update_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.UpdateAccessLevelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_access_level_from_dict():
test_update_access_level(request_type=dict)
def test_update_access_level_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_access_level),
'__call__') as call:
client.update_access_level()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.UpdateAccessLevelRequest()
@pytest.mark.asyncio
async def test_update_access_level_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.UpdateAccessLevelRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.update_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.UpdateAccessLevelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_access_level_async_from_dict():
await test_update_access_level_async(request_type=dict)
def test_update_access_level_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.UpdateAccessLevelRequest()
request.access_level.name = 'access_level.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_access_level),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.update_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'access_level.name=access_level.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_access_level_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.UpdateAccessLevelRequest()
request.access_level.name = 'access_level.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_access_level),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.update_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'access_level.name=access_level.name/value',
) in kw['metadata']
def test_update_access_level_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_access_level(
access_level=gia_access_level.AccessLevel(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].access_level == gia_access_level.AccessLevel(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
def test_update_access_level_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_access_level(
access_context_manager.UpdateAccessLevelRequest(),
access_level=gia_access_level.AccessLevel(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
@pytest.mark.asyncio
async def test_update_access_level_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_access_level(
access_level=gia_access_level.AccessLevel(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].access_level == gia_access_level.AccessLevel(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
@pytest.mark.asyncio
async def test_update_access_level_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_access_level(
access_context_manager.UpdateAccessLevelRequest(),
access_level=gia_access_level.AccessLevel(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
def test_delete_access_level(transport: str = 'grpc', request_type=access_context_manager.DeleteAccessLevelRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.delete_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.DeleteAccessLevelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_access_level_from_dict():
test_delete_access_level(request_type=dict)
def test_delete_access_level_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_access_level),
'__call__') as call:
client.delete_access_level()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.DeleteAccessLevelRequest()
@pytest.mark.asyncio
async def test_delete_access_level_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.DeleteAccessLevelRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.delete_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.DeleteAccessLevelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_access_level_async_from_dict():
await test_delete_access_level_async(request_type=dict)
def test_delete_access_level_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.DeleteAccessLevelRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_access_level),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.delete_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_access_level_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.DeleteAccessLevelRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_access_level),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.delete_access_level(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_access_level_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_access_level(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_access_level_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_access_level(
access_context_manager.DeleteAccessLevelRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_access_level_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_access_level),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_access_level(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_access_level_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_access_level(
access_context_manager.DeleteAccessLevelRequest(),
name='name_value',
)
def test_replace_access_levels(transport: str = 'grpc', request_type=access_context_manager.ReplaceAccessLevelsRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.replace_access_levels),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.replace_access_levels(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ReplaceAccessLevelsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_replace_access_levels_from_dict():
test_replace_access_levels(request_type=dict)
def test_replace_access_levels_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.replace_access_levels),
'__call__') as call:
client.replace_access_levels()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ReplaceAccessLevelsRequest()
@pytest.mark.asyncio
async def test_replace_access_levels_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.ReplaceAccessLevelsRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.replace_access_levels),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.replace_access_levels(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ReplaceAccessLevelsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_replace_access_levels_async_from_dict():
await test_replace_access_levels_async(request_type=dict)
def test_replace_access_levels_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.ReplaceAccessLevelsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.replace_access_levels),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.replace_access_levels(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_replace_access_levels_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.ReplaceAccessLevelsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.replace_access_levels),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.replace_access_levels(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_service_perimeters(transport: str = 'grpc', request_type=access_context_manager.ListServicePerimetersRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_service_perimeters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_context_manager.ListServicePerimetersResponse(
next_page_token='next_page_token_value',
)
response = client.list_service_perimeters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ListServicePerimetersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListServicePerimetersPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_service_perimeters_from_dict():
test_list_service_perimeters(request_type=dict)
def test_list_service_perimeters_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_service_perimeters),
'__call__') as call:
client.list_service_perimeters()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ListServicePerimetersRequest()
@pytest.mark.asyncio
async def test_list_service_perimeters_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.ListServicePerimetersRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_service_perimeters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(access_context_manager.ListServicePerimetersResponse(
next_page_token='next_page_token_value',
))
response = await client.list_service_perimeters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ListServicePerimetersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListServicePerimetersAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_service_perimeters_async_from_dict():
await test_list_service_perimeters_async(request_type=dict)
def test_list_service_perimeters_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.ListServicePerimetersRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_service_perimeters),
'__call__') as call:
call.return_value = access_context_manager.ListServicePerimetersResponse()
client.list_service_perimeters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_service_perimeters_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.ListServicePerimetersRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_service_perimeters),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(access_context_manager.ListServicePerimetersResponse())
await client.list_service_perimeters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_service_perimeters_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_service_perimeters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_context_manager.ListServicePerimetersResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_service_perimeters(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_service_perimeters_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_service_perimeters(
access_context_manager.ListServicePerimetersRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_service_perimeters_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_service_perimeters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_context_manager.ListServicePerimetersResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(access_context_manager.ListServicePerimetersResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_service_perimeters(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_service_perimeters_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_service_perimeters(
access_context_manager.ListServicePerimetersRequest(),
parent='parent_value',
)
def test_list_service_perimeters_pager():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_service_perimeters),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[
service_perimeter.ServicePerimeter(),
service_perimeter.ServicePerimeter(),
service_perimeter.ServicePerimeter(),
],
next_page_token='abc',
),
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[],
next_page_token='def',
),
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[
service_perimeter.ServicePerimeter(),
],
next_page_token='ghi',
),
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[
service_perimeter.ServicePerimeter(),
service_perimeter.ServicePerimeter(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_service_perimeters(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, service_perimeter.ServicePerimeter)
for i in results)
def test_list_service_perimeters_pages():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_service_perimeters),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[
service_perimeter.ServicePerimeter(),
service_perimeter.ServicePerimeter(),
service_perimeter.ServicePerimeter(),
],
next_page_token='abc',
),
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[],
next_page_token='def',
),
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[
service_perimeter.ServicePerimeter(),
],
next_page_token='ghi',
),
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[
service_perimeter.ServicePerimeter(),
service_perimeter.ServicePerimeter(),
],
),
RuntimeError,
)
pages = list(client.list_service_perimeters(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_service_perimeters_async_pager():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_service_perimeters),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[
service_perimeter.ServicePerimeter(),
service_perimeter.ServicePerimeter(),
service_perimeter.ServicePerimeter(),
],
next_page_token='abc',
),
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[],
next_page_token='def',
),
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[
service_perimeter.ServicePerimeter(),
],
next_page_token='ghi',
),
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[
service_perimeter.ServicePerimeter(),
service_perimeter.ServicePerimeter(),
],
),
RuntimeError,
)
async_pager = await client.list_service_perimeters(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, service_perimeter.ServicePerimeter)
for i in responses)
@pytest.mark.asyncio
async def test_list_service_perimeters_async_pages():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_service_perimeters),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[
service_perimeter.ServicePerimeter(),
service_perimeter.ServicePerimeter(),
service_perimeter.ServicePerimeter(),
],
next_page_token='abc',
),
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[],
next_page_token='def',
),
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[
service_perimeter.ServicePerimeter(),
],
next_page_token='ghi',
),
access_context_manager.ListServicePerimetersResponse(
service_perimeters=[
service_perimeter.ServicePerimeter(),
service_perimeter.ServicePerimeter(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_service_perimeters(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_get_service_perimeter(transport: str = 'grpc', request_type=access_context_manager.GetServicePerimeterRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = service_perimeter.ServicePerimeter(
name='name_value',
title='title_value',
description='description_value',
perimeter_type=service_perimeter.ServicePerimeter.PerimeterType.PERIMETER_TYPE_BRIDGE,
use_explicit_dry_run_spec=True,
)
response = client.get_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.GetServicePerimeterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, service_perimeter.ServicePerimeter)
assert response.name == 'name_value'
assert response.title == 'title_value'
assert response.description == 'description_value'
assert response.perimeter_type == service_perimeter.ServicePerimeter.PerimeterType.PERIMETER_TYPE_BRIDGE
assert response.use_explicit_dry_run_spec is True
def test_get_service_perimeter_from_dict():
test_get_service_perimeter(request_type=dict)
def test_get_service_perimeter_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_service_perimeter),
'__call__') as call:
client.get_service_perimeter()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.GetServicePerimeterRequest()
@pytest.mark.asyncio
async def test_get_service_perimeter_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.GetServicePerimeterRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(service_perimeter.ServicePerimeter(
name='name_value',
title='title_value',
description='description_value',
perimeter_type=service_perimeter.ServicePerimeter.PerimeterType.PERIMETER_TYPE_BRIDGE,
use_explicit_dry_run_spec=True,
))
response = await client.get_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.GetServicePerimeterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, service_perimeter.ServicePerimeter)
assert response.name == 'name_value'
assert response.title == 'title_value'
assert response.description == 'description_value'
assert response.perimeter_type == service_perimeter.ServicePerimeter.PerimeterType.PERIMETER_TYPE_BRIDGE
assert response.use_explicit_dry_run_spec is True
@pytest.mark.asyncio
async def test_get_service_perimeter_async_from_dict():
await test_get_service_perimeter_async(request_type=dict)
def test_get_service_perimeter_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.GetServicePerimeterRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_service_perimeter),
'__call__') as call:
call.return_value = service_perimeter.ServicePerimeter()
client.get_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_service_perimeter_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.GetServicePerimeterRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_service_perimeter),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service_perimeter.ServicePerimeter())
await client.get_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_service_perimeter_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = service_perimeter.ServicePerimeter()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_service_perimeter(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_service_perimeter_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_service_perimeter(
access_context_manager.GetServicePerimeterRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_service_perimeter_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = service_perimeter.ServicePerimeter()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(service_perimeter.ServicePerimeter())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_service_perimeter(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_service_perimeter_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_service_perimeter(
access_context_manager.GetServicePerimeterRequest(),
name='name_value',
)
def test_create_service_perimeter(transport: str = 'grpc', request_type=access_context_manager.CreateServicePerimeterRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.create_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.CreateServicePerimeterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_service_perimeter_from_dict():
test_create_service_perimeter(request_type=dict)
def test_create_service_perimeter_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_service_perimeter),
'__call__') as call:
client.create_service_perimeter()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.CreateServicePerimeterRequest()
@pytest.mark.asyncio
async def test_create_service_perimeter_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.CreateServicePerimeterRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.create_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.CreateServicePerimeterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_service_perimeter_async_from_dict():
await test_create_service_perimeter_async(request_type=dict)
def test_create_service_perimeter_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.CreateServicePerimeterRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_service_perimeter),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.create_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_service_perimeter_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.CreateServicePerimeterRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_service_perimeter),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.create_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_service_perimeter_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_service_perimeter(
parent='parent_value',
service_perimeter=gia_service_perimeter.ServicePerimeter(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].service_perimeter == gia_service_perimeter.ServicePerimeter(name='name_value')
def test_create_service_perimeter_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_service_perimeter(
access_context_manager.CreateServicePerimeterRequest(),
parent='parent_value',
service_perimeter=gia_service_perimeter.ServicePerimeter(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_service_perimeter_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_service_perimeter(
parent='parent_value',
service_perimeter=gia_service_perimeter.ServicePerimeter(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].service_perimeter == gia_service_perimeter.ServicePerimeter(name='name_value')
@pytest.mark.asyncio
async def test_create_service_perimeter_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_service_perimeter(
access_context_manager.CreateServicePerimeterRequest(),
parent='parent_value',
service_perimeter=gia_service_perimeter.ServicePerimeter(name='name_value'),
)
def test_update_service_perimeter(transport: str = 'grpc', request_type=access_context_manager.UpdateServicePerimeterRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.update_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.UpdateServicePerimeterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_service_perimeter_from_dict():
test_update_service_perimeter(request_type=dict)
def test_update_service_perimeter_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_service_perimeter),
'__call__') as call:
client.update_service_perimeter()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.UpdateServicePerimeterRequest()
@pytest.mark.asyncio
async def test_update_service_perimeter_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.UpdateServicePerimeterRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.update_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.UpdateServicePerimeterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_service_perimeter_async_from_dict():
await test_update_service_perimeter_async(request_type=dict)
def test_update_service_perimeter_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.UpdateServicePerimeterRequest()
request.service_perimeter.name = 'service_perimeter.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_service_perimeter),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.update_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'service_perimeter.name=service_perimeter.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_service_perimeter_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.UpdateServicePerimeterRequest()
request.service_perimeter.name = 'service_perimeter.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_service_perimeter),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.update_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'service_perimeter.name=service_perimeter.name/value',
) in kw['metadata']
def test_update_service_perimeter_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_service_perimeter(
service_perimeter=gia_service_perimeter.ServicePerimeter(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].service_perimeter == gia_service_perimeter.ServicePerimeter(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
def test_update_service_perimeter_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_service_perimeter(
access_context_manager.UpdateServicePerimeterRequest(),
service_perimeter=gia_service_perimeter.ServicePerimeter(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
@pytest.mark.asyncio
async def test_update_service_perimeter_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_service_perimeter(
service_perimeter=gia_service_perimeter.ServicePerimeter(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].service_perimeter == gia_service_perimeter.ServicePerimeter(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
@pytest.mark.asyncio
async def test_update_service_perimeter_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_service_perimeter(
access_context_manager.UpdateServicePerimeterRequest(),
service_perimeter=gia_service_perimeter.ServicePerimeter(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
def test_delete_service_perimeter(transport: str = 'grpc', request_type=access_context_manager.DeleteServicePerimeterRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.delete_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.DeleteServicePerimeterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_service_perimeter_from_dict():
test_delete_service_perimeter(request_type=dict)
def test_delete_service_perimeter_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_service_perimeter),
'__call__') as call:
client.delete_service_perimeter()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.DeleteServicePerimeterRequest()
@pytest.mark.asyncio
async def test_delete_service_perimeter_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.DeleteServicePerimeterRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.delete_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.DeleteServicePerimeterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_service_perimeter_async_from_dict():
await test_delete_service_perimeter_async(request_type=dict)
def test_delete_service_perimeter_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.DeleteServicePerimeterRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_service_perimeter),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.delete_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_service_perimeter_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.DeleteServicePerimeterRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_service_perimeter),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.delete_service_perimeter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_service_perimeter_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_service_perimeter(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_service_perimeter_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_service_perimeter(
access_context_manager.DeleteServicePerimeterRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_service_perimeter_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_service_perimeter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_service_perimeter(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_service_perimeter_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_service_perimeter(
access_context_manager.DeleteServicePerimeterRequest(),
name='name_value',
)
def test_replace_service_perimeters(transport: str = 'grpc', request_type=access_context_manager.ReplaceServicePerimetersRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.replace_service_perimeters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.replace_service_perimeters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ReplaceServicePerimetersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_replace_service_perimeters_from_dict():
test_replace_service_perimeters(request_type=dict)
def test_replace_service_perimeters_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.replace_service_perimeters),
'__call__') as call:
client.replace_service_perimeters()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ReplaceServicePerimetersRequest()
@pytest.mark.asyncio
async def test_replace_service_perimeters_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.ReplaceServicePerimetersRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.replace_service_perimeters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.replace_service_perimeters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ReplaceServicePerimetersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_replace_service_perimeters_async_from_dict():
await test_replace_service_perimeters_async(request_type=dict)
def test_replace_service_perimeters_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.ReplaceServicePerimetersRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.replace_service_perimeters),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.replace_service_perimeters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_replace_service_perimeters_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.ReplaceServicePerimetersRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.replace_service_perimeters),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.replace_service_perimeters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_commit_service_perimeters(transport: str = 'grpc', request_type=access_context_manager.CommitServicePerimetersRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit_service_perimeters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.commit_service_perimeters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.CommitServicePerimetersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_commit_service_perimeters_from_dict():
test_commit_service_perimeters(request_type=dict)
def test_commit_service_perimeters_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit_service_perimeters),
'__call__') as call:
client.commit_service_perimeters()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.CommitServicePerimetersRequest()
@pytest.mark.asyncio
async def test_commit_service_perimeters_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.CommitServicePerimetersRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit_service_perimeters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.commit_service_perimeters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.CommitServicePerimetersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_commit_service_perimeters_async_from_dict():
await test_commit_service_perimeters_async(request_type=dict)
def test_commit_service_perimeters_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.CommitServicePerimetersRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit_service_perimeters),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.commit_service_perimeters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_commit_service_perimeters_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.CommitServicePerimetersRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.commit_service_perimeters),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.commit_service_perimeters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_gcp_user_access_bindings(transport: str = 'grpc', request_type=access_context_manager.ListGcpUserAccessBindingsRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_gcp_user_access_bindings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_context_manager.ListGcpUserAccessBindingsResponse(
next_page_token='next_page_token_value',
)
response = client.list_gcp_user_access_bindings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ListGcpUserAccessBindingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListGcpUserAccessBindingsPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_gcp_user_access_bindings_from_dict():
test_list_gcp_user_access_bindings(request_type=dict)
def test_list_gcp_user_access_bindings_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_gcp_user_access_bindings),
'__call__') as call:
client.list_gcp_user_access_bindings()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ListGcpUserAccessBindingsRequest()
@pytest.mark.asyncio
async def test_list_gcp_user_access_bindings_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.ListGcpUserAccessBindingsRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_gcp_user_access_bindings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(access_context_manager.ListGcpUserAccessBindingsResponse(
next_page_token='next_page_token_value',
))
response = await client.list_gcp_user_access_bindings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.ListGcpUserAccessBindingsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListGcpUserAccessBindingsAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_gcp_user_access_bindings_async_from_dict():
await test_list_gcp_user_access_bindings_async(request_type=dict)
def test_list_gcp_user_access_bindings_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.ListGcpUserAccessBindingsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_gcp_user_access_bindings),
'__call__') as call:
call.return_value = access_context_manager.ListGcpUserAccessBindingsResponse()
client.list_gcp_user_access_bindings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_gcp_user_access_bindings_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.ListGcpUserAccessBindingsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_gcp_user_access_bindings),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(access_context_manager.ListGcpUserAccessBindingsResponse())
await client.list_gcp_user_access_bindings(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_gcp_user_access_bindings_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_gcp_user_access_bindings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_context_manager.ListGcpUserAccessBindingsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_gcp_user_access_bindings(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_gcp_user_access_bindings_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_gcp_user_access_bindings(
access_context_manager.ListGcpUserAccessBindingsRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_gcp_user_access_bindings_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_gcp_user_access_bindings),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = access_context_manager.ListGcpUserAccessBindingsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(access_context_manager.ListGcpUserAccessBindingsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_gcp_user_access_bindings(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_gcp_user_access_bindings_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_gcp_user_access_bindings(
access_context_manager.ListGcpUserAccessBindingsRequest(),
parent='parent_value',
)
def test_list_gcp_user_access_bindings_pager():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_gcp_user_access_bindings),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[
gcp_user_access_binding.GcpUserAccessBinding(),
gcp_user_access_binding.GcpUserAccessBinding(),
gcp_user_access_binding.GcpUserAccessBinding(),
],
next_page_token='abc',
),
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[],
next_page_token='def',
),
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[
gcp_user_access_binding.GcpUserAccessBinding(),
],
next_page_token='ghi',
),
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[
gcp_user_access_binding.GcpUserAccessBinding(),
gcp_user_access_binding.GcpUserAccessBinding(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_gcp_user_access_bindings(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, gcp_user_access_binding.GcpUserAccessBinding)
for i in results)
def test_list_gcp_user_access_bindings_pages():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_gcp_user_access_bindings),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[
gcp_user_access_binding.GcpUserAccessBinding(),
gcp_user_access_binding.GcpUserAccessBinding(),
gcp_user_access_binding.GcpUserAccessBinding(),
],
next_page_token='abc',
),
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[],
next_page_token='def',
),
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[
gcp_user_access_binding.GcpUserAccessBinding(),
],
next_page_token='ghi',
),
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[
gcp_user_access_binding.GcpUserAccessBinding(),
gcp_user_access_binding.GcpUserAccessBinding(),
],
),
RuntimeError,
)
pages = list(client.list_gcp_user_access_bindings(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_gcp_user_access_bindings_async_pager():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_gcp_user_access_bindings),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[
gcp_user_access_binding.GcpUserAccessBinding(),
gcp_user_access_binding.GcpUserAccessBinding(),
gcp_user_access_binding.GcpUserAccessBinding(),
],
next_page_token='abc',
),
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[],
next_page_token='def',
),
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[
gcp_user_access_binding.GcpUserAccessBinding(),
],
next_page_token='ghi',
),
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[
gcp_user_access_binding.GcpUserAccessBinding(),
gcp_user_access_binding.GcpUserAccessBinding(),
],
),
RuntimeError,
)
async_pager = await client.list_gcp_user_access_bindings(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, gcp_user_access_binding.GcpUserAccessBinding)
for i in responses)
@pytest.mark.asyncio
async def test_list_gcp_user_access_bindings_async_pages():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_gcp_user_access_bindings),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[
gcp_user_access_binding.GcpUserAccessBinding(),
gcp_user_access_binding.GcpUserAccessBinding(),
gcp_user_access_binding.GcpUserAccessBinding(),
],
next_page_token='abc',
),
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[],
next_page_token='def',
),
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[
gcp_user_access_binding.GcpUserAccessBinding(),
],
next_page_token='ghi',
),
access_context_manager.ListGcpUserAccessBindingsResponse(
gcp_user_access_bindings=[
gcp_user_access_binding.GcpUserAccessBinding(),
gcp_user_access_binding.GcpUserAccessBinding(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_gcp_user_access_bindings(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_get_gcp_user_access_binding(transport: str = 'grpc', request_type=access_context_manager.GetGcpUserAccessBindingRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcp_user_access_binding.GcpUserAccessBinding(
name='name_value',
group_key='group_key_value',
access_levels=['access_levels_value'],
)
response = client.get_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.GetGcpUserAccessBindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcp_user_access_binding.GcpUserAccessBinding)
assert response.name == 'name_value'
assert response.group_key == 'group_key_value'
assert response.access_levels == ['access_levels_value']
def test_get_gcp_user_access_binding_from_dict():
test_get_gcp_user_access_binding(request_type=dict)
def test_get_gcp_user_access_binding_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_gcp_user_access_binding),
'__call__') as call:
client.get_gcp_user_access_binding()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.GetGcpUserAccessBindingRequest()
@pytest.mark.asyncio
async def test_get_gcp_user_access_binding_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.GetGcpUserAccessBindingRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gcp_user_access_binding.GcpUserAccessBinding(
name='name_value',
group_key='group_key_value',
access_levels=['access_levels_value'],
))
response = await client.get_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.GetGcpUserAccessBindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gcp_user_access_binding.GcpUserAccessBinding)
assert response.name == 'name_value'
assert response.group_key == 'group_key_value'
assert response.access_levels == ['access_levels_value']
@pytest.mark.asyncio
async def test_get_gcp_user_access_binding_async_from_dict():
await test_get_gcp_user_access_binding_async(request_type=dict)
def test_get_gcp_user_access_binding_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.GetGcpUserAccessBindingRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_gcp_user_access_binding),
'__call__') as call:
call.return_value = gcp_user_access_binding.GcpUserAccessBinding()
client.get_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_gcp_user_access_binding_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.GetGcpUserAccessBindingRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_gcp_user_access_binding),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcp_user_access_binding.GcpUserAccessBinding())
await client.get_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_gcp_user_access_binding_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcp_user_access_binding.GcpUserAccessBinding()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_gcp_user_access_binding(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_gcp_user_access_binding_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_gcp_user_access_binding(
access_context_manager.GetGcpUserAccessBindingRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_gcp_user_access_binding_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = gcp_user_access_binding.GcpUserAccessBinding()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gcp_user_access_binding.GcpUserAccessBinding())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_gcp_user_access_binding(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_gcp_user_access_binding_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_gcp_user_access_binding(
access_context_manager.GetGcpUserAccessBindingRequest(),
name='name_value',
)
def test_create_gcp_user_access_binding(transport: str = 'grpc', request_type=access_context_manager.CreateGcpUserAccessBindingRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.create_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.CreateGcpUserAccessBindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_gcp_user_access_binding_from_dict():
test_create_gcp_user_access_binding(request_type=dict)
def test_create_gcp_user_access_binding_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_gcp_user_access_binding),
'__call__') as call:
client.create_gcp_user_access_binding()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.CreateGcpUserAccessBindingRequest()
@pytest.mark.asyncio
async def test_create_gcp_user_access_binding_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.CreateGcpUserAccessBindingRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.create_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.CreateGcpUserAccessBindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_gcp_user_access_binding_async_from_dict():
await test_create_gcp_user_access_binding_async(request_type=dict)
def test_create_gcp_user_access_binding_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.CreateGcpUserAccessBindingRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_gcp_user_access_binding),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.create_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_gcp_user_access_binding_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.CreateGcpUserAccessBindingRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_gcp_user_access_binding),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.create_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_gcp_user_access_binding_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_gcp_user_access_binding(
parent='parent_value',
gcp_user_access_binding=gia_gcp_user_access_binding.GcpUserAccessBinding(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].gcp_user_access_binding == gia_gcp_user_access_binding.GcpUserAccessBinding(name='name_value')
def test_create_gcp_user_access_binding_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_gcp_user_access_binding(
access_context_manager.CreateGcpUserAccessBindingRequest(),
parent='parent_value',
gcp_user_access_binding=gia_gcp_user_access_binding.GcpUserAccessBinding(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_gcp_user_access_binding_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_gcp_user_access_binding(
parent='parent_value',
gcp_user_access_binding=gia_gcp_user_access_binding.GcpUserAccessBinding(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].gcp_user_access_binding == gia_gcp_user_access_binding.GcpUserAccessBinding(name='name_value')
@pytest.mark.asyncio
async def test_create_gcp_user_access_binding_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_gcp_user_access_binding(
access_context_manager.CreateGcpUserAccessBindingRequest(),
parent='parent_value',
gcp_user_access_binding=gia_gcp_user_access_binding.GcpUserAccessBinding(name='name_value'),
)
def test_update_gcp_user_access_binding(transport: str = 'grpc', request_type=access_context_manager.UpdateGcpUserAccessBindingRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.update_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.UpdateGcpUserAccessBindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_gcp_user_access_binding_from_dict():
test_update_gcp_user_access_binding(request_type=dict)
def test_update_gcp_user_access_binding_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_gcp_user_access_binding),
'__call__') as call:
client.update_gcp_user_access_binding()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.UpdateGcpUserAccessBindingRequest()
@pytest.mark.asyncio
async def test_update_gcp_user_access_binding_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.UpdateGcpUserAccessBindingRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.update_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.UpdateGcpUserAccessBindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_gcp_user_access_binding_async_from_dict():
await test_update_gcp_user_access_binding_async(request_type=dict)
def test_update_gcp_user_access_binding_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.UpdateGcpUserAccessBindingRequest()
request.gcp_user_access_binding.name = 'gcp_user_access_binding.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_gcp_user_access_binding),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.update_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'gcp_user_access_binding.name=gcp_user_access_binding.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_gcp_user_access_binding_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.UpdateGcpUserAccessBindingRequest()
request.gcp_user_access_binding.name = 'gcp_user_access_binding.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_gcp_user_access_binding),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.update_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'gcp_user_access_binding.name=gcp_user_access_binding.name/value',
) in kw['metadata']
def test_update_gcp_user_access_binding_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_gcp_user_access_binding(
gcp_user_access_binding=gia_gcp_user_access_binding.GcpUserAccessBinding(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].gcp_user_access_binding == gia_gcp_user_access_binding.GcpUserAccessBinding(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
def test_update_gcp_user_access_binding_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_gcp_user_access_binding(
access_context_manager.UpdateGcpUserAccessBindingRequest(),
gcp_user_access_binding=gia_gcp_user_access_binding.GcpUserAccessBinding(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
@pytest.mark.asyncio
async def test_update_gcp_user_access_binding_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_gcp_user_access_binding(
gcp_user_access_binding=gia_gcp_user_access_binding.GcpUserAccessBinding(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].gcp_user_access_binding == gia_gcp_user_access_binding.GcpUserAccessBinding(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
@pytest.mark.asyncio
async def test_update_gcp_user_access_binding_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_gcp_user_access_binding(
access_context_manager.UpdateGcpUserAccessBindingRequest(),
gcp_user_access_binding=gia_gcp_user_access_binding.GcpUserAccessBinding(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
def test_delete_gcp_user_access_binding(transport: str = 'grpc', request_type=access_context_manager.DeleteGcpUserAccessBindingRequest):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.delete_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.DeleteGcpUserAccessBindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_gcp_user_access_binding_from_dict():
test_delete_gcp_user_access_binding(request_type=dict)
def test_delete_gcp_user_access_binding_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_gcp_user_access_binding),
'__call__') as call:
client.delete_gcp_user_access_binding()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.DeleteGcpUserAccessBindingRequest()
@pytest.mark.asyncio
async def test_delete_gcp_user_access_binding_async(transport: str = 'grpc_asyncio', request_type=access_context_manager.DeleteGcpUserAccessBindingRequest):
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.delete_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == access_context_manager.DeleteGcpUserAccessBindingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_gcp_user_access_binding_async_from_dict():
await test_delete_gcp_user_access_binding_async(request_type=dict)
def test_delete_gcp_user_access_binding_field_headers():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.DeleteGcpUserAccessBindingRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_gcp_user_access_binding),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.delete_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_gcp_user_access_binding_field_headers_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = access_context_manager.DeleteGcpUserAccessBindingRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_gcp_user_access_binding),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.delete_gcp_user_access_binding(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_gcp_user_access_binding_flattened():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_gcp_user_access_binding(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_gcp_user_access_binding_flattened_error():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_gcp_user_access_binding(
access_context_manager.DeleteGcpUserAccessBindingRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_gcp_user_access_binding_flattened_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_gcp_user_access_binding),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_gcp_user_access_binding(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_gcp_user_access_binding_flattened_error_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_gcp_user_access_binding(
access_context_manager.DeleteGcpUserAccessBindingRequest(),
name='name_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.AccessContextManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.AccessContextManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AccessContextManagerClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.AccessContextManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AccessContextManagerClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.AccessContextManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = AccessContextManagerClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.AccessContextManagerGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.AccessContextManagerGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.AccessContextManagerGrpcTransport,
transports.AccessContextManagerGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.AccessContextManagerGrpcTransport,
)
def test_access_context_manager_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.AccessContextManagerTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_access_context_manager_base_transport():
# Instantiate the base transport.
with mock.patch('google.identity.accesscontextmanager_v1.services.access_context_manager.transports.AccessContextManagerTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.AccessContextManagerTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'list_access_policies',
'get_access_policy',
'create_access_policy',
'update_access_policy',
'delete_access_policy',
'list_access_levels',
'get_access_level',
'create_access_level',
'update_access_level',
'delete_access_level',
'replace_access_levels',
'list_service_perimeters',
'get_service_perimeter',
'create_service_perimeter',
'update_service_perimeter',
'delete_service_perimeter',
'replace_service_perimeters',
'commit_service_perimeters',
'list_gcp_user_access_bindings',
'get_gcp_user_access_binding',
'create_gcp_user_access_binding',
'update_gcp_user_access_binding',
'delete_gcp_user_access_binding',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
@requires_google_auth_gte_1_25_0
def test_access_context_manager_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.identity.accesscontextmanager_v1.services.access_context_manager.transports.AccessContextManagerTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AccessContextManagerTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_access_context_manager_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.identity.accesscontextmanager_v1.services.access_context_manager.transports.AccessContextManagerTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AccessContextManagerTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
def test_access_context_manager_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.identity.accesscontextmanager_v1.services.access_context_manager.transports.AccessContextManagerTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AccessContextManagerTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_access_context_manager_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
AccessContextManagerClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_access_context_manager_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
AccessContextManagerClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.AccessContextManagerGrpcTransport,
transports.AccessContextManagerGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_access_context_manager_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.AccessContextManagerGrpcTransport,
transports.AccessContextManagerGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_access_context_manager_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.AccessContextManagerGrpcTransport, grpc_helpers),
(transports.AccessContextManagerGrpcAsyncIOTransport, grpc_helpers_async)
],
)
def test_access_context_manager_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"accesscontextmanager.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
scopes=["1", "2"],
default_host="accesscontextmanager.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.AccessContextManagerGrpcTransport, transports.AccessContextManagerGrpcAsyncIOTransport])
def test_access_context_manager_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_access_context_manager_host_no_port():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='accesscontextmanager.googleapis.com'),
)
assert client.transport._host == 'accesscontextmanager.googleapis.com:443'
def test_access_context_manager_host_with_port():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='accesscontextmanager.googleapis.com:8000'),
)
assert client.transport._host == 'accesscontextmanager.googleapis.com:8000'
def test_access_context_manager_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AccessContextManagerGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_access_context_manager_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AccessContextManagerGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.AccessContextManagerGrpcTransport, transports.AccessContextManagerGrpcAsyncIOTransport])
def test_access_context_manager_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.AccessContextManagerGrpcTransport, transports.AccessContextManagerGrpcAsyncIOTransport])
def test_access_context_manager_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_access_context_manager_grpc_lro_client():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_access_context_manager_grpc_lro_async_client():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc_asyncio',
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsAsyncClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_access_level_path():
access_policy = "squid"
access_level = "clam"
expected = "accessPolicies/{access_policy}/accessLevels/{access_level}".format(access_policy=access_policy, access_level=access_level, )
actual = AccessContextManagerClient.access_level_path(access_policy, access_level)
assert expected == actual
def test_parse_access_level_path():
expected = {
"access_policy": "whelk",
"access_level": "octopus",
}
path = AccessContextManagerClient.access_level_path(**expected)
# Check that the path construction is reversible.
actual = AccessContextManagerClient.parse_access_level_path(path)
assert expected == actual
def test_access_policy_path():
access_policy = "oyster"
expected = "accessPolicies/{access_policy}".format(access_policy=access_policy, )
actual = AccessContextManagerClient.access_policy_path(access_policy)
assert expected == actual
def test_parse_access_policy_path():
expected = {
"access_policy": "nudibranch",
}
path = AccessContextManagerClient.access_policy_path(**expected)
# Check that the path construction is reversible.
actual = AccessContextManagerClient.parse_access_policy_path(path)
assert expected == actual
def test_gcp_user_access_binding_path():
organization = "cuttlefish"
gcp_user_access_binding = "mussel"
expected = "organizations/{organization}/gcpUserAccessBindings/{gcp_user_access_binding}".format(organization=organization, gcp_user_access_binding=gcp_user_access_binding, )
actual = AccessContextManagerClient.gcp_user_access_binding_path(organization, gcp_user_access_binding)
assert expected == actual
def test_parse_gcp_user_access_binding_path():
expected = {
"organization": "winkle",
"gcp_user_access_binding": "nautilus",
}
path = AccessContextManagerClient.gcp_user_access_binding_path(**expected)
# Check that the path construction is reversible.
actual = AccessContextManagerClient.parse_gcp_user_access_binding_path(path)
assert expected == actual
def test_service_perimeter_path():
access_policy = "scallop"
service_perimeter = "abalone"
expected = "accessPolicies/{access_policy}/servicePerimeters/{service_perimeter}".format(access_policy=access_policy, service_perimeter=service_perimeter, )
actual = AccessContextManagerClient.service_perimeter_path(access_policy, service_perimeter)
assert expected == actual
def test_parse_service_perimeter_path():
expected = {
"access_policy": "squid",
"service_perimeter": "clam",
}
path = AccessContextManagerClient.service_perimeter_path(**expected)
# Check that the path construction is reversible.
actual = AccessContextManagerClient.parse_service_perimeter_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = AccessContextManagerClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = AccessContextManagerClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = AccessContextManagerClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder, )
actual = AccessContextManagerClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = AccessContextManagerClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = AccessContextManagerClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization, )
actual = AccessContextManagerClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = AccessContextManagerClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = AccessContextManagerClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project, )
actual = AccessContextManagerClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = AccessContextManagerClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = AccessContextManagerClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = AccessContextManagerClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = AccessContextManagerClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = AccessContextManagerClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.AccessContextManagerTransport, '_prep_wrapped_messages') as prep:
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.AccessContextManagerTransport, '_prep_wrapped_messages') as prep:
transport_class = AccessContextManagerClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = AccessContextManagerAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
'grpc',
]
for transport in transports:
client = AccessContextManagerClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| 39.281998
| 263
| 0.691879
| 29,867
| 262,718
| 5.824689
| 0.016841
| 0.023464
| 0.036099
| 0.05539
| 0.951939
| 0.936718
| 0.924232
| 0.902533
| 0.885409
| 0.879281
| 0
| 0.004036
| 0.231313
| 262,718
| 6,687
| 264
| 39.287872
| 0.857404
| 0.204569
| 0
| 0.731637
| 0
| 0
| 0.066778
| 0.023076
| 0
| 0
| 0
| 0.00015
| 0.123009
| 1
| 0.042257
| false
| 0.000221
| 0.008628
| 0.000442
| 0.051327
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bb93cc81e6f633bac0bc459b82ddc669c9e84426
| 158
|
py
|
Python
|
wsgi_basic/token/__init__.py
|
QthCN/wsgi-basic
|
e080304aeaa9922fc9367dbb5cb57a7ab9494b38
|
[
"Apache-2.0"
] | null | null | null |
wsgi_basic/token/__init__.py
|
QthCN/wsgi-basic
|
e080304aeaa9922fc9367dbb5cb57a7ab9494b38
|
[
"Apache-2.0"
] | null | null | null |
wsgi_basic/token/__init__.py
|
QthCN/wsgi-basic
|
e080304aeaa9922fc9367dbb5cb57a7ab9494b38
|
[
"Apache-2.0"
] | null | null | null |
from wsgi_basic.token import controllers
from wsgi_basic.token import providers
from wsgi_basic.token import routers
from wsgi_basic.token import persistence
| 31.6
| 40
| 0.873418
| 24
| 158
| 5.583333
| 0.375
| 0.238806
| 0.38806
| 0.537313
| 0.716418
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101266
| 158
| 4
| 41
| 39.5
| 0.943662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
bbcceef09ee53587969805ff7e8c32280783a493
| 197
|
py
|
Python
|
light_cnns/Inception/__init__.py
|
murufeng/awesome_lightweight_networks
|
dfa19bd7ee491a7b7ade360175244c81b3c0e322
|
[
"MIT"
] | 318
|
2021-08-15T10:33:27.000Z
|
2022-03-31T16:42:50.000Z
|
light_cnns/Inception/__init__.py
|
x779250919/awesome_lightweight_networks
|
dfa19bd7ee491a7b7ade360175244c81b3c0e322
|
[
"MIT"
] | 6
|
2021-11-16T06:27:34.000Z
|
2022-02-08T07:57:52.000Z
|
light_cnns/Inception/__init__.py
|
x779250919/awesome_lightweight_networks
|
dfa19bd7ee491a7b7ade360175244c81b3c0e322
|
[
"MIT"
] | 67
|
2021-11-01T13:06:48.000Z
|
2022-03-24T12:59:41.000Z
|
from .inception_v1 import *
from .inception_v2 import *
from .inception_v3 import *
from .inception_v4 import *
from .Xception import *
from .pattern_zoo import *
from .inception_nas import *
| 28.142857
| 28
| 0.761421
| 27
| 197
| 5.333333
| 0.407407
| 0.416667
| 0.527778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 0.167513
| 197
| 7
| 29
| 28.142857
| 0.853659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
bbf008567dc986f686e84b670e534441af36362f
| 10,853
|
py
|
Python
|
puq/cnote.py
|
zoidy/puq
|
ed70f113f7c135ee61adeebfc9bd18c541970caf
|
[
"MIT"
] | null | null | null |
puq/cnote.py
|
zoidy/puq
|
ed70f113f7c135ee61adeebfc9bd18c541970caf
|
[
"MIT"
] | null | null | null |
puq/cnote.py
|
zoidy/puq
|
ed70f113f7c135ee61adeebfc9bd18c541970caf
|
[
"MIT"
] | null | null | null |
"""A custom Ttk Notebook with a colored square in the tab
"""
import Tkinter as tkinter
import ttk
red_square = "R0lGODdhEAAQAIcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADMAAGYAAJkAAMwAAP8AAAAzADMzAGYzAJkzAMwzAP8zAABmADNmAGZmAJlmAMxmAP9mAACZADOZAGaZAJmZAMyZAP+ZAADMADPMAGbMAJnMAMzMAP/MAAD/ADP/AGb/AJn/AMz/AP//AAAAMzMAM2YAM5kAM8wAM/8AMwAzMzMzM2YzM5kzM8wzM/8zMwBmMzNmM2ZmM5lmM8xmM/9mMwCZMzOZM2aZM5mZM8yZM/+ZMwDMMzPMM2bMM5nMM8zMM//MMwD/MzP/M2b/M5n/M8z/M///MwAAZjMAZmYAZpkAZswAZv8AZgAzZjMzZmYzZpkzZswzZv8zZgBmZjNmZmZmZplmZsxmZv9mZgCZZjOZZmaZZpmZZsyZZv+ZZgDMZjPMZmbMZpnMZszMZv/MZgD/ZjP/Zmb/Zpn/Zsz/Zv//ZgAAmTMAmWYAmZkAmcwAmf8AmQAzmTMzmWYzmZkzmcwzmf8zmQBmmTNmmWZmmZlmmcxmmf9mmQCZmTOZmWaZmZmZmcyZmf+ZmQDMmTPMmWbMmZnMmczMmf/MmQD/mTP/mWb/mZn/mcz/mf//mQAAzDMAzGYAzJkAzMwAzP8AzAAzzDMzzGYzzJkzzMwzzP8zzABmzDNmzGZmzJlmzMxmzP9mzACZzDOZzGaZzJmZzMyZzP+ZzADMzDPMzGbMzJnMzMzMzP/MzAD/zDP/zGb/zJn/zMz/zP//zAAA/zMA/2YA/5kA/8wA//8A/wAz/zMz/2Yz/5kz/8wz//8z/wBm/zNm/2Zm/5lm/8xm//9m/wCZ/zOZ/2aZ/5mZ/8yZ//+Z/wDM/zPM/2bM/5nM/8zM///M/wD//zP//2b//5n//8z//////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACwAAAAAEAAQAEAIHQAfCBxIsKDBgwgTKlzIsKHDhxAjSpxIsaLFgQEBADs="
blue_square = "R0lGODdhEAAQAIcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADMAAGYAAJkAAMwAAP8AAAAzADMzAGYzAJkzAMwzAP8zAABmADNmAGZmAJlmAMxmAP9mAACZADOZAGaZAJmZAMyZAP+ZAADMADPMAGbMAJnMAMzMAP/MAAD/ADP/AGb/AJn/AMz/AP//AAAAMzMAM2YAM5kAM8wAM/8AMwAzMzMzM2YzM5kzM8wzM/8zMwBmMzNmM2ZmM5lmM8xmM/9mMwCZMzOZM2aZM5mZM8yZM/+ZMwDMMzPMM2bMM5nMM8zMM//MMwD/MzP/M2b/M5n/M8z/M///MwAAZjMAZmYAZpkAZswAZv8AZgAzZjMzZmYzZpkzZswzZv8zZgBmZjNmZmZmZplmZsxmZv9mZgCZZjOZZmaZZpmZZsyZZv+ZZgDMZjPMZmbMZpnMZszMZv/MZgD/ZjP/Zmb/Zpn/Zsz/Zv//ZgAAmTMAmWYAmZkAmcwAmf8AmQAzmTMzmWYzmZkzmcwzmf8zmQBmmTNmmWZmmZlmmcxmmf9mmQCZmTOZmWaZmZmZmcyZmf+ZmQDMmTPMmWbMmZnMmczMmf/MmQD/mTP/mWb/mZn/mcz/mf//mQAAzDMAzGYAzJkAzMwAzP8AzAAzzDMzzGYzzJkzzMwzzP8zzABmzDNmzGZmzJlmzMxmzP9mzACZzDOZzGaZzJmZzMyZzP+ZzADMzDPMzGbMzJnMzMzMzP/MzAD/zDP/zGb/zJn/zMz/zP//zAAA/zMA/2YA/5kA/8wA//8A/wAz/zMz/2Yz/5kz/8wz//8z/wBm/zNm/2Zm/5lm/8xm//9m/wCZ/zOZ/2aZ/5mZ/8yZ//+Z/wDM/zPM/2bM/5nM/8zM///M/wD//zP//2b//5n//8z//////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACwAAAAAEAAQAEAIHQB9CRxIsKDBgwgTKlzIsKHDhxAjSpxIsaLFgQEBADs="
green_square = "R0lGODdhEAAQAIcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADMAAGYAAJkAAMwAAP8AAAAzADMzAGYzAJkzAMwzAP8zAABmADNmAGZmAJlmAMxmAP9mAACZADOZAGaZAJmZAMyZAP+ZAADMADPMAGbMAJnMAMzMAP/MAAD/ADP/AGb/AJn/AMz/AP//AAAAMzMAM2YAM5kAM8wAM/8AMwAzMzMzM2YzM5kzM8wzM/8zMwBmMzNmM2ZmM5lmM8xmM/9mMwCZMzOZM2aZM5mZM8yZM/+ZMwDMMzPMM2bMM5nMM8zMM//MMwD/MzP/M2b/M5n/M8z/M///MwAAZjMAZmYAZpkAZswAZv8AZgAzZjMzZmYzZpkzZswzZv8zZgBmZjNmZmZmZplmZsxmZv9mZgCZZjOZZmaZZpmZZsyZZv+ZZgDMZjPMZmbMZpnMZszMZv/MZgD/ZjP/Zmb/Zpn/Zsz/Zv//ZgAAmTMAmWYAmZkAmcwAmf8AmQAzmTMzmWYzmZkzmcwzmf8zmQBmmTNmmWZmmZlmmcxmmf9mmQCZmTOZmWaZmZmZmcyZmf+ZmQDMmTPMmWbMmZnMmczMmf/MmQD/mTP/mWb/mZn/mcz/mf//mQAAzDMAzGYAzJkAzMwAzP8AzAAzzDMzzGYzzJkzzMwzzP8zzABmzDNmzGZmzJlmzMxmzP9mzACZzDOZzGaZzJmZzMyZzP+ZzADMzDPMzGbMzJnMzMzMzP/MzAD/zDP/zGb/zJn/zMz/zP//zAAA/zMA/2YA/5kA/8wA//8A/wAz/zMz/2Yz/5kz/8wz//8z/wBm/zNm/2Zm/5lm/8xm//9m/wCZ/zOZ/2aZ/5mZ/8yZ//+Z/wDM/zPM/2bM/5nM/8zM///M/wD//zP//2b//5n//8z//////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACwAAAAAEAAQAEAIHQA5CBxIsKDBgwgTKlzIsKHDhxAjSpxIsaLFgQEBADs="
cyan_square = "R0lGODdhEAAQAIcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADMAAGYAAJkAAMwAAP8AAAAzADMzAGYzAJkzAMwzAP8zAABmADNmAGZmAJlmAMxmAP9mAACZADOZAGaZAJmZAMyZAP+ZAADMADPMAGbMAJnMAMzMAP/MAAD/ADP/AGb/AJn/AMz/AP//AAAAMzMAM2YAM5kAM8wAM/8AMwAzMzMzM2YzM5kzM8wzM/8zMwBmMzNmM2ZmM5lmM8xmM/9mMwCZMzOZM2aZM5mZM8yZM/+ZMwDMMzPMM2bMM5nMM8zMM//MMwD/MzP/M2b/M5n/M8z/M///MwAAZjMAZmYAZpkAZswAZv8AZgAzZjMzZmYzZpkzZswzZv8zZgBmZjNmZmZmZplmZsxmZv9mZgCZZjOZZmaZZpmZZsyZZv+ZZgDMZjPMZmbMZpnMZszMZv/MZgD/ZjP/Zmb/Zpn/Zsz/Zv//ZgAAmTMAmWYAmZkAmcwAmf8AmQAzmTMzmWYzmZkzmcwzmf8zmQBmmTNmmWZmmZlmmcxmmf9mmQCZmTOZmWaZmZmZmcyZmf+ZmQDMmTPMmWbMmZnMmczMmf/MmQD/mTP/mWb/mZn/mcz/mf//mQAAzDMAzGYAzJkAzMwAzP8AzAAzzDMzzGYzzJkzzMwzzP8zzABmzDNmzGZmzJlmzMxmzP9mzACZzDOZzGaZzJmZzMyZzP+ZzADMzDPMzGbMzJnMzMzMzP/MzAD/zDP/zGb/zJn/zMz/zP//zAAA/zMA/2YA/5kA/8wA//8A/wAz/zMz/2Yz/5kz/8wz//8z/wBm/zNm/2Zm/5lm/8xm//9m/wCZ/zOZ/2aZ/5mZ/8yZ//+Z/wDM/zPM/2bM/5nM/8zM///M/wD//zP//2b//5n//8z//////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACwAAAAAEAAQAEAIHQC5CRxIsKDBgwgTKlzIsKHDhxAjSpxIsaLFgQEBADs="
magenta_square = "R0lGODdhEAAQAIcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADMAAGYAAJkAAMwAAP8AAAAzADMzAGYzAJkzAMwzAP8zAABmADNmAGZmAJlmAMxmAP9mAACZADOZAGaZAJmZAMyZAP+ZAADMADPMAGbMAJnMAMzMAP/MAAD/ADP/AGb/AJn/AMz/AP//AAAAMzMAM2YAM5kAM8wAM/8AMwAzMzMzM2YzM5kzM8wzM/8zMwBmMzNmM2ZmM5lmM8xmM/9mMwCZMzOZM2aZM5mZM8yZM/+ZMwDMMzPMM2bMM5nMM8zMM//MMwD/MzP/M2b/M5n/M8z/M///MwAAZjMAZmYAZpkAZswAZv8AZgAzZjMzZmYzZpkzZswzZv8zZgBmZjNmZmZmZplmZsxmZv9mZgCZZjOZZmaZZpmZZsyZZv+ZZgDMZjPMZmbMZpnMZszMZv/MZgD/ZjP/Zmb/Zpn/Zsz/Zv//ZgAAmTMAmWYAmZkAmcwAmf8AmQAzmTMzmWYzmZkzmcwzmf8zmQBmmTNmmWZmmZlmmcxmmf9mmQCZmTOZmWaZmZmZmcyZmf+ZmQDMmTPMmWbMmZnMmczMmf/MmQD/mTP/mWb/mZn/mcz/mf//mQAAzDMAzGYAzJkAzMwAzP8AzAAzzDMzzGYzzJkzzMwzzP8zzABmzDNmzGZmzJlmzMxmzP9mzACZzDOZzGaZzJmZzMyZzP+ZzADMzDPMzGbMzJnMzMzMzP/MzAD/zDP/zGb/zJn/zMz/zP//zAAA/zMA/2YA/5kA/8wA//8A/wAz/zMz/2Yz/5kz/8wz//8z/wBm/zNm/2Zm/5lm/8xm//9m/wCZ/zOZ/2aZ/5mZ/8yZ//+Z/wDM/zPM/2bM/5nM/8zM///M/wD//zP//2b//5n//8z//////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACwAAAAAEAAQAEAIHQCHCRxIsKDBgwgTKlzIsKHDhxAjSpxIsaLFgQEBADs="
yellow_square = "R0lGODdhEAAQAIcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADMAAGYAAJkAAMwAAP8AAAAzADMzAGYzAJkzAMwzAP8zAABmADNmAGZmAJlmAMxmAP9mAACZADOZAGaZAJmZAMyZAP+ZAADMADPMAGbMAJnMAMzMAP/MAAD/ADP/AGb/AJn/AMz/AP//AAAAMzMAM2YAM5kAM8wAM/8AMwAzMzMzM2YzM5kzM8wzM/8zMwBmMzNmM2ZmM5lmM8xmM/9mMwCZMzOZM2aZM5mZM8yZM/+ZMwDMMzPMM2bMM5nMM8zMM//MMwD/MzP/M2b/M5n/M8z/M///MwAAZjMAZmYAZpkAZswAZv8AZgAzZjMzZmYzZpkzZswzZv8zZgBmZjNmZmZmZplmZsxmZv9mZgCZZjOZZmaZZpmZZsyZZv+ZZgDMZjPMZmbMZpnMZszMZv/MZgD/ZjP/Zmb/Zpn/Zsz/Zv//ZgAAmTMAmWYAmZkAmcwAmf8AmQAzmTMzmWYzmZkzmcwzmf8zmQBmmTNmmWZmmZlmmcxmmf9mmQCZmTOZmWaZmZmZmcyZmf+ZmQDMmTPMmWbMmZnMmczMmf/MmQD/mTP/mWb/mZn/mcz/mf//mQAAzDMAzGYAzJkAzMwAzP8AzAAzzDMzzGYzzJkzzMwzzP8zzABmzDNmzGZmzJlmzMxmzP9mzACZzDOZzGaZzJmZzMyZzP+ZzADMzDPMzGbMzJnMzMzMzP/MzAD/zDP/zGb/zJn/zMz/zP//zAAA/zMA/2YA/5kA/8wA//8A/wAz/zMz/2Yz/5kz/8wz//8z/wBm/zNm/2Zm/5lm/8xm//9m/wCZ/zOZ/2aZ/5mZ/8yZ//+Z/wDM/zPM/2bM/5nM/8zM///M/wD//zP//2b//5n//8z//////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACwAAAAAEAAQAEAIHQBbCBxIsKDBgwgTKlzIsKHDhxAjSpxIsaLFgQEBADs="
orange_square = "R0lGODdhEAAQAIcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADMAAGYAAJkAAMwAAP8AAAAzADMzAGYzAJkzAMwzAP8zAABmADNmAGZmAJlmAMxmAP9mAACZADOZAGaZAJmZAMyZAP+ZAADMADPMAGbMAJnMAMzMAP/MAAD/ADP/AGb/AJn/AMz/AP//AAAAMzMAM2YAM5kAM8wAM/8AMwAzMzMzM2YzM5kzM8wzM/8zMwBmMzNmM2ZmM5lmM8xmM/9mMwCZMzOZM2aZM5mZM8yZM/+ZMwDMMzPMM2bMM5nMM8zMM//MMwD/MzP/M2b/M5n/M8z/M///MwAAZjMAZmYAZpkAZswAZv8AZgAzZjMzZmYzZpkzZswzZv8zZgBmZjNmZmZmZplmZsxmZv9mZgCZZjOZZmaZZpmZZsyZZv+ZZgDMZjPMZmbMZpnMZszMZv/MZgD/ZjP/Zmb/Zpn/Zsz/Zv//ZgAAmTMAmWYAmZkAmcwAmf8AmQAzmTMzmWYzmZkzmcwzmf8zmQBmmTNmmWZmmZlmmcxmmf9mmQCZmTOZmWaZmZmZmcyZmf+ZmQDMmTPMmWbMmZnMmczMmf/MmQD/mTP/mWb/mZn/mcz/mf//mQAAzDMAzGYAzJkAzMwAzP8AzAAzzDMzzGYzzJkzzMwzzP8zzABmzDNmzGZmzJlmzMxmzP9mzACZzDOZzGaZzJmZzMyZzP+ZzADMzDPMzGbMzJnMzMzMzP/MzAD/zDP/zGb/zJn/zMz/zP//zAAA/zMA/2YA/5kA/8wA//8A/wAz/zMz/2Yz/5kz/8wz//8z/wBm/zNm/2Zm/5lm/8xm//9m/wCZ/zOZ/2aZ/5mZ/8yZ//+Z/wDM/zPM/2bM/5nM/8zM///M/wD//zP//2b//5n//8z//////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACwAAAAAEAAQAEAIHQBDCBxIsKDBgwgTKlzIsKHDhxAjSpxIsaLFgQEBADs="
black_square = "R0lGODdhEAAQAIcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADMAAGYAAJkAAMwAAP8AAAAzADMzAGYzAJkzAMwzAP8zAABmADNmAGZmAJlmAMxmAP9mAACZADOZAGaZAJmZAMyZAP+ZAADMADPMAGbMAJnMAMzMAP/MAAD/ADP/AGb/AJn/AMz/AP//AAAAMzMAM2YAM5kAM8wAM/8AMwAzMzMzM2YzM5kzM8wzM/8zMwBmMzNmM2ZmM5lmM8xmM/9mMwCZMzOZM2aZM5mZM8yZM/+ZMwDMMzPMM2bMM5nMM8zMM//MMwD/MzP/M2b/M5n/M8z/M///MwAAZjMAZmYAZpkAZswAZv8AZgAzZjMzZmYzZpkzZswzZv8zZgBmZjNmZmZmZplmZsxmZv9mZgCZZjOZZmaZZpmZZsyZZv+ZZgDMZjPMZmbMZpnMZszMZv/MZgD/ZjP/Zmb/Zpn/Zsz/Zv//ZgAAmTMAmWYAmZkAmcwAmf8AmQAzmTMzmWYzmZkzmcwzmf8zmQBmmTNmmWZmmZlmmcxmmf9mmQCZmTOZmWaZmZmZmcyZmf+ZmQDMmTPMmWbMmZnMmczMmf/MmQD/mTP/mWb/mZn/mcz/mf//mQAAzDMAzGYAzJkAzMwAzP8AzAAzzDMzzGYzzJkzzMwzzP8zzABmzDNmzGZmzJlmzMxmzP9mzACZzDOZzGaZzJmZzMyZzP+ZzADMzDPMzGbMzJnMzMzMzP/MzAD/zDP/zGb/zJn/zMz/zP//zAAA/zMA/2YA/5kA/8wA//8A/wAz/zMz/2Yz/5kz/8wz//8z/wBm/zNm/2Zm/5lm/8xm//9m/wCZ/zOZ/2aZ/5mZ/8yZ//+Z/wDM/zPM/2bM/5nM/8zM///M/wD//zP//2b//5n//8z//////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACwAAAAAEAAQAEAIHQABCBxIsKDBgwgTKlzIsKHDhxAjSpxIsaLFgQEBADs="
sq_icons = {
'red': red_square,
'blue': blue_square,
'green': green_square,
'cyan': cyan_square,
'magenta': magenta_square,
'yellow': yellow_square,
'orange': orange_square,
'black': black_square,
}
sq_colors = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'orange']
sq_image_cache = {}
def sq_image(color):
global sq_image_cache, sq_colors, sq_icons
try:
return sq_image_cache[color]
except KeyError:
if isinstance(color, int):
color = sq_colors[color]
if color in sq_colors:
sq_image_cache[color] = tkinter.PhotoImage(data=sq_icons[color])
return sq_image_cache[color]
else:
raise ValueError("Unknown color %s" % color)
class CNote(ttk.Notebook):
initialized = False
def __init__(self, parent, height=0, width=0):
ttk.Notebook.__init__(self, parent, width=width, height=height)
def add(self, frame, **kwargs):
color = kwargs.get('color')
if color is not None:
del kwargs['color']
kwargs['image'] = sq_image(color)
kwargs['compound'] = 'left'
ttk.Notebook.add(self, frame, **kwargs)
if __name__ == "__main__":
root = tkinter.Tk()
f = tkinter.Frame(root)
nb = CNote(f, 200, 600)
nb2 = CNote(f, 400, 600)
f1 = tkinter.Frame(nb)
f2 = tkinter.Frame(nb)
f3 = tkinter.Frame(nb2)
f4 = tkinter.Frame(nb2)
nb.add(f1, color='red', text='Frame1', padding=3)
nb.add(f2, color='green', text='Frame2', padding=3)
nb2.add(f3, color=5, text='Frame10', padding=3)
nb2.add(f4, color='orange', text='Frame11', padding=3)
nb.pack(expand=1, fill='both')
nb2.pack(expand=1, fill='both')
f.pack(expand=1, fill='both')
root.mainloop()
| 139.141026
| 1,119
| 0.85147
| 905
| 10,853
| 10.154696
| 0.19116
| 0.1358
| 0.155822
| 0.159304
| 0.72938
| 0.718172
| 0.718172
| 0.718172
| 0.718172
| 0.718172
| 0
| 0.046761
| 0.04432
| 10,853
| 77
| 1,120
| 140.948052
| 0.839279
| 0.004976
| 0
| 0.032258
| 0
| 0.129032
| 0.832376
| 0.815419
| 0
| 1
| 0
| 0
| 0
| 1
| 0.048387
| false
| 0
| 0.032258
| 0
| 0.145161
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
bbf18055f80049cdad3a3e108e76f9dbd88dffd7
| 6,701
|
py
|
Python
|
flask_blueprint_test/flaskapp/mongodbRestapi.py
|
WommyInStandingPosition/YtbDataApiRelated
|
4856ad2ee5be49bb74c79c3d6649f9d1fdbdc85d
|
[
"MIT"
] | null | null | null |
flask_blueprint_test/flaskapp/mongodbRestapi.py
|
WommyInStandingPosition/YtbDataApiRelated
|
4856ad2ee5be49bb74c79c3d6649f9d1fdbdc85d
|
[
"MIT"
] | null | null | null |
flask_blueprint_test/flaskapp/mongodbRestapi.py
|
WommyInStandingPosition/YtbDataApiRelated
|
4856ad2ee5be49bb74c79c3d6649f9d1fdbdc85d
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, Response, request, json
from .mongodbapi import YtbSearchRecordDBAPI_V0
mongodbrestapi = Blueprint('mongodbrestapi', __name__)
# TODO: modify the route to /ytbrecordapi/v0/{db_url}/{db_col}/readall|read|write|update|delete
def verify_db_access(db_url, db_port, db_name, col_name, access_level=None):
if not access_level:
pass
@mongodbrestapi.route('/ytbrecordapi/v0/readall', methods=['GET'])
def ytb_record_db_api_readall_legacy():
db_obj = YtbSearchRecordDBAPI_V0({})
return_data = db_obj.read()
return Response(response=json.dumps(return_data),
status=200,
mimetype='application/json')
@mongodbrestapi.route('/ytbrecordapi/v0/<string:db_url>/<int:db_port>/<string:db_name>/<string:col_name>/readall',
methods=['GET'])
def ytb_record_db_api_readall(db_url, db_port, db_name, col_name):
verify_db_access(db_url, db_port, db_name, col_name)
db_obj = YtbSearchRecordDBAPI_V0({}, db_url=db_url, db_port=db_port, db_name=db_name, col_name=col_name)
return_data = db_obj.read()
return Response(response=json.dumps(return_data),
status=200,
mimetype='application/json')
@mongodbrestapi.route('/ytbrecordapi/v0/read', methods=['POST'])
def ytb_record_db_api_read_legacy():
data = request.json
if not data:
return Response(response=json.dumps({"Error": "Please provide connection information"}),
status=400,
mimetype='application/json')
db_obj = YtbSearchRecordDBAPI_V0(data)
return_data = db_obj.read()
return Response(response=json.dumps(return_data),
status=200,
mimetype='application/json')
@mongodbrestapi.route('/ytbrecordapi/v0/<string:db_url>/<int:db_port>/<string:db_name>/<string:col_name>/read',
methods=['POST'])
def ytb_record_db_api_read(db_url, db_port, db_name, col_name):
data = request.json
if not data:
return Response(response=json.dumps({"Error": "Please provide connection information"}),
status=400,
mimetype='application/json')
verify_db_access(db_url, db_port, db_name, col_name)
db_obj = YtbSearchRecordDBAPI_V0(data, db_url=db_url, db_port=db_port, db_name=db_name, col_name=col_name)
return_data = db_obj.read()
return Response(response=json.dumps(return_data),
status=200,
mimetype='application/json')
@mongodbrestapi.route('/ytbrecordapi/v0/write', methods=['POST'])
def ytb_record_db_api_write_legacy():
data = request.json
if not data:
return Response(response=json.dumps({"Error": "Please provide connection information"}),
status=400,
mimetype='application/json')
db_obj = YtbSearchRecordDBAPI_V0(data)
write_status = db_obj.write()
return Response(response=json.dumps({'write_status': write_status}),
status=200,
mimetype='application/json')
@mongodbrestapi.route('/ytbrecordapi/v0/<string:db_url>/<int:db_port>/<string:db_name>/<string:col_name>/write',
methods=['POST'])
def ytb_record_db_api_write(db_url, db_port, db_name, col_name):
data = request.json
if not data:
return Response(response=json.dumps({"Error": "Please provide connection information"}),
status=400,
mimetype='application/json')
verify_db_access(db_url, db_port, db_name, col_name)
db_obj = YtbSearchRecordDBAPI_V0(data, db_url=db_url, db_port=db_port, db_name=db_name, col_name=col_name)
write_status = db_obj.write()
return Response(response=json.dumps({'write_status': write_status}),
status=200,
mimetype='application/json')
@mongodbrestapi.route('/ytbrecordapi/v0/update', methods=['PUT'])
def ytb_record_db_api_update_legacy():
data = request.json
if not data:
return Response(response=json.dumps({"Error": "Please provide connection information"}),
status=400,
mimetype='application/json')
db_obj = YtbSearchRecordDBAPI_V0(data)
update_status = db_obj.update()
return Response(response=json.dumps({'update_status': update_status}),
status=200,
mimetype='application/json')
@mongodbrestapi.route('/ytbrecordapi/v0/<string:db_url>/<int:db_port>/<string:db_name>/<string:col_name>/update',
methods=['PUT'])
def ytb_record_db_api_update(db_url, db_port, db_name, col_name):
data = request.json
if not data:
return Response(response=json.dumps({"Error": "Please provide connection information"}),
status=400,
mimetype='application/json')
verify_db_access(db_url, db_port, db_name, col_name)
db_obj = YtbSearchRecordDBAPI_V0(data, db_url=db_url, db_port=db_port, db_name=db_name, col_name=col_name)
update_status = db_obj.update()
return Response(response=json.dumps({'update_status': update_status}),
status=200,
mimetype='application/json')
@mongodbrestapi.route('/ytbrecordapi/v0/delete', methods=['DELETE'])
def ytb_record_db_api_delete_legacy():
data = request.json
if not data:
return Response(response=json.dumps({"Error": "Please provide connection information"}),
status=400,
mimetype='application/json')
db_obj = YtbSearchRecordDBAPI_V0(data)
delete_status = db_obj.delete()
return Response(response=json.dumps({'delete_status': delete_status}),
status=200,
mimetype='application/json')
@mongodbrestapi.route('/ytbrecordapi/v0/<string:db_url>/<int:db_port>/<string:db_name>/<string:col_name>/delete',
methods=['DELETE'])
def ytb_record_db_api_delete(db_url, db_port, db_name, col_name):
data = request.json
if not data:
return Response(response=json.dumps({"Error": "Please provide connection information"}),
status=400,
mimetype='application/json')
verify_db_access(db_url, db_port, db_name, col_name)
db_obj = YtbSearchRecordDBAPI_V0(data, db_url=db_url, db_port=db_port, db_name=db_name, col_name=col_name)
delete_status = db_obj.delete()
return Response(response=json.dumps({'delete_status': delete_status}),
status=200,
mimetype='application/json')
| 41.621118
| 114
| 0.650201
| 820
| 6,701
| 5.037805
| 0.071951
| 0.03268
| 0.037279
| 0.11329
| 0.926894
| 0.926894
| 0.926894
| 0.926894
| 0.921084
| 0.826192
| 0
| 0.01476
| 0.231607
| 6,701
| 161
| 115
| 41.621118
| 0.787532
| 0.013879
| 0
| 0.753968
| 0
| 0.039683
| 0.197518
| 0.083396
| 0
| 0
| 0
| 0.006211
| 0
| 1
| 0.087302
| false
| 0.007937
| 0.015873
| 0
| 0.246032
| 0.015873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a53c898049d6477eac1652e13c1ef49f02da7972
| 198
|
py
|
Python
|
lawyerd/complaint/views.py
|
loobinsk/customer_project
|
4f43d4c6db2c99926715ea16451511466569c4ae
|
[
"MIT"
] | null | null | null |
lawyerd/complaint/views.py
|
loobinsk/customer_project
|
4f43d4c6db2c99926715ea16451511466569c4ae
|
[
"MIT"
] | null | null | null |
lawyerd/complaint/views.py
|
loobinsk/customer_project
|
4f43d4c6db2c99926715ea16451511466569c4ae
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.shortcuts import render
| 33
| 57
| 0.873737
| 25
| 198
| 6.88
| 0.56
| 0.232558
| 0.197674
| 0.244186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085859
| 198
| 5
| 58
| 39.6
| 0.950276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
a5c017c5e85f3eef83b456d34fb5434aa08098c2
| 48
|
py
|
Python
|
glpy/__init__.py
|
abhinav-TB/glpy
|
00d8769064dd3371c32663479654bb2b124738ae
|
[
"Apache-2.0"
] | 11
|
2021-09-10T20:59:41.000Z
|
2021-11-16T12:39:41.000Z
|
glpy/__init__.py
|
abhinav-TB/glpy
|
00d8769064dd3371c32663479654bb2b124738ae
|
[
"Apache-2.0"
] | 1
|
2021-09-18T21:32:01.000Z
|
2021-10-16T05:43:27.000Z
|
glpy/__init__.py
|
abhinav-TB/glpy
|
00d8769064dd3371c32663479654bb2b124738ae
|
[
"Apache-2.0"
] | null | null | null |
from glpy.main import *
from glpy.utils import *
| 24
| 24
| 0.770833
| 8
| 48
| 4.625
| 0.625
| 0.432432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 2
| 24
| 24
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3c144fa89ce3b272acad5983728738c87a402db3
| 3,960
|
py
|
Python
|
04_Selenium/framework/with_nose_test.py
|
twiindan/selenium_lessons
|
798557e8f584f9e6655414c13f232017483f0439
|
[
"Apache-2.0"
] | null | null | null |
04_Selenium/framework/with_nose_test.py
|
twiindan/selenium_lessons
|
798557e8f584f9e6655414c13f232017483f0439
|
[
"Apache-2.0"
] | null | null | null |
04_Selenium/framework/with_nose_test.py
|
twiindan/selenium_lessons
|
798557e8f584f9e6655414c13f232017483f0439
|
[
"Apache-2.0"
] | 1
|
2020-07-16T09:49:47.000Z
|
2020-07-16T09:49:47.000Z
|
from nose.tools import assert_equals
from selenium import webdriver
import random
class TestCreatePoll():
baseUrl = "http://twiindan.pythonanywhere.com/admin"
@classmethod
def setup_class(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(3)
def setup(self):
self.driver.get(self.baseUrl)
def test_createPoll(self):
loginTextBox = self.driver.find_element_by_id("id_username")
passwordTextBox = self.driver.find_element_by_id("id_password")
logInButton = self.driver.find_element_by_xpath("//input[contains(@value, 'Log in')]")
loginTextBox.send_keys("user1")
passwordTextBox.send_keys("selenium")
logInButton.click()
questionsLink = self.driver.find_element_by_link_text("Questions")
assert_equals(questionsLink.get_attribute('href'), 'http://twiindan.pythonanywhere.com/admin/polls/question/')
addLink = self.driver.find_element_by_class_name("addlink")
addLink.click()
questionText = self.driver.find_element_by_id("id_question_text")
showMore = self.driver.find_element_by_id("fieldsetcollapser0")
showMore.click()
today_link = self.driver.find_element_by_link_text("Today")
today_link.click()
now_link = self.driver.find_element_by_link_text("Now")
now_link.click()
random_number = random.randint(1, 100000)
questionText.send_keys("Question {}".format(random_number))
choiceText1 = self.driver.find_element_by_name("choice_set-0-choice_text")
choiceText2 = self.driver.find_element_by_name("choice_set-1-choice_text")
choiceText3 = self.driver.find_element_by_name("choice_set-2-choice_text")
choiceText1.send_keys("Selenium")
choiceText2.send_keys("Python")
choiceText3.send_keys("Webpages")
choiceVotes = self.driver.find_element_by_name("choice_set-0-votes")
choiceVotes.clear()
choiceVotes.send_keys("3")
saveButton = self.driver.find_element_by_name("_save")
saveButton.click()
def test_createPollWithOnlyTwoOptions(self):
loginTextBox = self.driver.find_element_by_id("id_username")
passwordTextBox = self.driver.find_element_by_id("id_password")
logInButton = self.driver.find_element_by_xpath("//input[contains(@value, 'Log in')]")
loginTextBox.send_keys("user1")
passwordTextBox.send_keys("selenium")
logInButton.click()
questionsLink = self.driver.find_element_by_link_text("Questions")
assert_equals(questionsLink.get_attribute('href'), 'http://twiindan.pythonanywhere.com/admin/polls/question/')
addLink = self.driver.find_element_by_class_name("addlink")
addLink.click()
questionText = self.driver.find_element_by_id("id_question_text")
showMore = self.driver.find_element_by_id("fieldsetcollapser0")
showMore.click()
today_link = self.driver.find_element_by_link_text("Today")
today_link.click()
now_link = self.driver.find_element_by_link_text("Now")
now_link.click()
random_number = random.randint(1, 100000)
questionText.send_keys("Question {}".format(random_number))
choiceText1 = self.driver.find_element_by_name("choice_set-0-choice_text")
choiceText2 = self.driver.find_element_by_name("choice_set-1-choice_text")
choiceText1.send_keys("Selenium")
choiceText2.send_keys("Python")
choiceVotes = self.driver.find_element_by_name("choice_set-0-votes")
choiceVotes.clear()
choiceVotes.send_keys("5")
saveButton = self.driver.find_element_by_name("_save")
saveButton.click()
def teardown(self):
logout_link = self.driver.find_element_by_link_text("Log out")
logout_link.click()
@classmethod
def teardown_class(self):
self.driver.quit()
| 34.434783
| 118
| 0.69697
| 473
| 3,960
| 5.513742
| 0.186047
| 0.122699
| 0.150307
| 0.22546
| 0.852377
| 0.83934
| 0.83934
| 0.83934
| 0.812117
| 0.812117
| 0
| 0.011842
| 0.189646
| 3,960
| 114
| 119
| 34.736842
| 0.800873
| 0
| 0
| 0.727273
| 0
| 0
| 0.16393
| 0.042435
| 0
| 0
| 0
| 0
| 0.038961
| 1
| 0.077922
| false
| 0.051948
| 0.038961
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
3c6311bcdb7a61738ccdd2c7804f828b98c29b85
| 203
|
py
|
Python
|
pySnowRadar/algorithms/__init__.py
|
kingjml/pySnowRadar
|
a64721c3a84f255aa3bb9b872682a79969f7b1be
|
[
"MIT"
] | 4
|
2020-06-04T00:25:46.000Z
|
2021-12-17T15:08:35.000Z
|
pySnowRadar/algorithms/__init__.py
|
kingjml/pySnowRadar
|
a64721c3a84f255aa3bb9b872682a79969f7b1be
|
[
"MIT"
] | 7
|
2020-02-19T11:34:26.000Z
|
2020-10-02T12:52:17.000Z
|
pySnowRadar/algorithms/__init__.py
|
kingjml/pySnowRadar
|
a64721c3a84f255aa3bb9b872682a79969f7b1be
|
[
"MIT"
] | null | null | null |
from .Wavelet import Wavelet_TN, Wavelet_JK
from .GSFC import GSFC_NK, NSIDC # Not yet integrated
from .Peakiness import Peakiness
def available_pickers():
return [Wavelet_TN, Wavelet_JK, Peakiness]
| 33.833333
| 53
| 0.79803
| 29
| 203
| 5.37931
| 0.551724
| 0.115385
| 0.205128
| 0.230769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 203
| 6
| 54
| 33.833333
| 0.891429
| 0.08867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.6
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
b1e5a635128f456550f9ff0d3e83c7e4b09ec80f
| 11,255
|
py
|
Python
|
test/test_messages.py
|
vinv1n/MultiChannel
|
6788cc8f306da8535ee7d182cbf5ed5d9c517669
|
[
"MIT"
] | null | null | null |
test/test_messages.py
|
vinv1n/MultiChannel
|
6788cc8f306da8535ee7d182cbf5ed5d9c517669
|
[
"MIT"
] | null | null | null |
test/test_messages.py
|
vinv1n/MultiChannel
|
6788cc8f306da8535ee7d182cbf5ed5d9c517669
|
[
"MIT"
] | null | null | null |
import unittest
import requests
import argparse
import logging
import json
URL = 'http://127.0.0.1:5000/api'
class get_messages_test(unittest.TestCase):
def setUp(self):
print("----------Running get_messages_test----------")
headers = {'Content-type': 'application/json'}
data = {"username": "admin", "password": "admin"}
login_response = requests.post(URL+'/user-login', headers=headers, data=json.dumps(data))
self.auth_cookies={'access_token_cookie':login_response.cookies.get('access_token_cookie'),
'refresh_token_cookie':login_response.cookies.get('refresh_token_cookie')}
response = requests.get(URL+'/users', cookies=self.auth_cookies)
users = response.json().get('users', [])
self.ids = [user.get('_id') for user in users]
def test_get_messages(self):
response = requests.get(URL+'/messages', cookies=self.auth_cookies)
self.assertEqual(response.status_code, 200)
def test_get_messages_fail_no_auth(self):
response = requests.get(URL+'/messages')
self.assertEqual(response.status_code, 401)
class create_message_test(unittest.TestCase):
def setUp(self):
print("----------Running create_messages_test----------")
headers = {'Content-type': 'application/json'}
data = {"username": "admin", "password": "admin"}
login_response = requests.post(URL+'/user-login', headers=headers, data=json.dumps(data))
self.auth_cookies={'access_token_cookie':login_response.cookies.get('access_token_cookie'),
'refresh_token_cookie':login_response.cookies.get('refresh_token_cookie')}
response = requests.get(URL+'/users', cookies=self.auth_cookies)
users = response.json().get('users', [])
self.ids = [user.get('_id') for user in users]
def test_create_message(self):
headers = {'Content-type': 'application/json'}
data = {
"message" : "TestMessage",
"sender" :"Sender",
"users" : self.ids,
"type" : 'fnf',
"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 200)
inserted_id = response.json().get('message_id')
get_response = requests.get(URL+'/messages/'+inserted_id, headers=headers, cookies=self.auth_cookies)
message_data = get_response.json().get('message')
self.assertEqual(get_response.status_code, 200)
self.assertAlmostEqual(data['message'], message_data['message'])
self.assertAlmostEqual(data['group_message'], message_data['group_message'])
self.assertAlmostEqual(data['type'], message_data['type'])
self.assertEqual(response.status_code, 200)
def test_create_message_fail_bad_headers(self):
headers = {}
data = {
"message" : "TestMessage",
"sender" :"Sender",
"users" : self.ids,
"type" : 'fnf',
"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 400)
def test_create_message_fail_extra(self):
headers = {'Content-type': 'application/json'}
data = {
"extra" : "extra",
"message" : "TestMessage",
"sender" :"Sender",
"users" : self.ids,
"type" : 'fnf',
"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 400)
def test_create_message_fail_no_message(self):
headers = {'Content-type': 'application/json'}
data = {
"sender" :"Sender",
"users" : self.ids,
"type" : 'fnf',
"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 400)
def test_create_message_fail_no_sender(self):
headers = {'Content-type': 'application/json'}
data = {
"message" : "TestMessage",
#"sender" :"Sender",
"users" : self.ids,
"type" : 'fnf',
"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 400)
def test_create_message_fail_no_users(self):
headers = {'Content-type': 'application/json'}
data = {
"message" : "TestMessage",
"sender" :"Sender",
#"users" : self.ids,
"type" : 'fnf',
"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 400)
def test_create_message_fail_no_type(self):
headers = {'Content-type': 'application/json'}
data = {
"message" : "TestMessage",
"sender" :"Sender",
"users" : self.ids,
#"type" : 'fnf',
"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 400)
def test_create_message_fail_no_group_message(self):
headers = {'Content-type': 'application/json'}
data = {
"message" : "TestMessage",
"sender" :"Sender",
"users" : self.ids,
"type" : 'fnf',
#"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 400)
def test_create_message_fail_short_message(self):
headers = {'Content-type': 'application/json'}
data = {
"message" : "T",
"sender" :"Sender",
"users" : self.ids,
"type" : 'fnf',
"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 400)
def test_create_message_fail_long_message(self):
headers = {'Content-type': 'application/json'}
data = {
"message" : 1000*"a",
"sender" :"Sender",
"users" : self.ids,
"type" : 'fnf',
"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 400)
def test_create_message_fail_users_empty(self):
headers = {'Content-type': 'application/json'}
data = {
"message" : 1000*"a",
"sender" :"Sender",
"users" : [],
"type" : 'fnf',
"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 400)
def test_create_message_fail_sender_empty(self):
headers = {'Content-type': 'application/json'}
data = {
"message" : "TestMessage",
"sender" :"",
"users" : self.ids,
"type" : 'fnf',
"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 400)
def test_create_message_fail_sender_empty(self):
headers = {'Content-type': 'application/json'}
data = {
"message" : "TestMessage",
"sender" :"",
"users" : self.ids,
"type" : 'fnf',
"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 400)
def test_create_message_fail_type_empty(self):
headers = {'Content-type': 'application/json'}
data = {
"message" : "TestMessage",
"sender" :"",
"users" : self.ids,
"type" : "",
"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 400)
def test_create_message_fail_type_wrong(self):
headers = {'Content-type': 'application/json'}
data = {
"message" : "TestMessage",
"sender" :"",
"users" : self.ids,
"type" : "this is wrong",
"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 400)
def test_create_message_fail_group_message_wrong(self):
headers = {'Content-type': 'application/json'}
data = {
"message" : "TestMessage",
"sender" :"",
"users" : self.ids,
"type" : 'fnf',
"group_message" : "This is wrong"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers, cookies=self.auth_cookies)
self.assertEqual(response.status_code, 400)
def test_create_message_fail_no_auth(self):
headers = {'Content-type': 'application/json'}
data = {
"message" : "TestMessage",
"sender" :"",
"users" : self.ids,
"type" : 'fnf',
"group_message" : "False"
}
response = requests.post(URL+'/messages',data = json.dumps(data), headers=headers)
self.assertEqual(response.status_code, 401)
if __name__ == '__main__':
unittest.main()
| 38.412969
| 117
| 0.541093
| 1,078
| 11,255
| 5.461967
| 0.072356
| 0.069124
| 0.056046
| 0.074728
| 0.902683
| 0.894701
| 0.86481
| 0.857846
| 0.826257
| 0.814878
| 0
| 0.010642
| 0.323767
| 11,255
| 293
| 118
| 38.412969
| 0.762975
| 0.00693
| 0
| 0.722467
| 0
| 0
| 0.18373
| 0.005101
| 0
| 0
| 0
| 0
| 0.105727
| 1
| 0.092511
| false
| 0.008811
| 0.022026
| 0
| 0.123348
| 0.008811
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b1ee7deaa6e1a8726d4bce6d12ec2d939466da00
| 3,244
|
py
|
Python
|
migrations/versions/fc07e3fa0086_sql_compatibility.py
|
PaliPalo/burp-ui
|
affbed705f5b35a630ca1a96c01e6dea1bfbeddb
|
[
"BSD-3-Clause"
] | 93
|
2015-02-10T16:01:46.000Z
|
2021-12-02T21:21:42.000Z
|
migrations/versions/fc07e3fa0086_sql_compatibility.py
|
PaliPalo/burp-ui
|
affbed705f5b35a630ca1a96c01e6dea1bfbeddb
|
[
"BSD-3-Clause"
] | 5
|
2015-12-18T19:34:46.000Z
|
2021-09-17T14:18:10.000Z
|
migrations/versions/fc07e3fa0086_sql_compatibility.py
|
PaliPalo/burp-ui
|
affbed705f5b35a630ca1a96c01e6dea1bfbeddb
|
[
"BSD-3-Clause"
] | 17
|
2015-09-21T22:24:05.000Z
|
2021-10-01T14:28:47.000Z
|
"""sql compatibility
Revision ID: fc07e3fa0086
Revises: 7f317474332d
Create Date: 2017-01-30 16:09:53.367166
"""
# revision identifiers, used by Alembic.
revision = "fc07e3fa0086"
down_revision = "7f317474332d"
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("session", schema=None) as batch_op:
batch_op.alter_column(
"ip",
existing_type=sa.VARCHAR(),
type_=sa.String(length=256),
existing_nullable=True,
)
batch_op.alter_column(
"ua",
existing_type=sa.VARCHAR(),
type_=sa.String(length=2048),
existing_nullable=True,
)
batch_op.alter_column(
"user",
existing_type=sa.VARCHAR(),
type_=sa.String(length=256),
existing_nullable=True,
)
batch_op.alter_column(
"uuid",
existing_type=sa.VARCHAR(),
type_=sa.String(length=256),
existing_nullable=True,
)
with op.batch_alter_table("task", schema=None) as batch_op:
batch_op.alter_column(
"task",
existing_type=sa.VARCHAR(),
type_=sa.String(length=256),
existing_nullable=True,
)
batch_op.alter_column(
"user",
existing_type=sa.VARCHAR(),
type_=sa.String(length=256),
existing_nullable=True,
)
batch_op.alter_column(
"uuid",
existing_type=sa.VARCHAR(),
type_=sa.String(length=256),
existing_nullable=True,
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("task", schema=None) as batch_op:
batch_op.alter_column(
"uuid",
existing_type=sa.String(length=256),
type_=sa.VARCHAR(),
existing_nullable=True,
)
batch_op.alter_column(
"user",
existing_type=sa.String(length=256),
type_=sa.VARCHAR(),
existing_nullable=True,
)
batch_op.alter_column(
"task",
existing_type=sa.String(length=256),
type_=sa.VARCHAR(),
existing_nullable=True,
)
with op.batch_alter_table("session", schema=None) as batch_op:
batch_op.alter_column(
"uuid",
existing_type=sa.String(length=256),
type_=sa.VARCHAR(),
existing_nullable=True,
)
batch_op.alter_column(
"user",
existing_type=sa.String(length=256),
type_=sa.VARCHAR(),
existing_nullable=True,
)
batch_op.alter_column(
"ua",
existing_type=sa.String(length=2048),
type_=sa.VARCHAR(),
existing_nullable=True,
)
batch_op.alter_column(
"ip",
existing_type=sa.String(length=256),
type_=sa.VARCHAR(),
existing_nullable=True,
)
### end Alembic commands ###
| 27.726496
| 66
| 0.548397
| 340
| 3,244
| 4.988235
| 0.170588
| 0.099057
| 0.099057
| 0.148585
| 0.867925
| 0.856132
| 0.834906
| 0.834906
| 0.81309
| 0.81309
| 0
| 0.045923
| 0.34217
| 3,244
| 116
| 67
| 27.965517
| 0.748828
| 0.090937
| 0
| 0.765957
| 0
| 0
| 0.032236
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0.021277
| 0
| 0.042553
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
593b135af51d7e9fb767a609e22f74b3ec449749
| 7,235
|
py
|
Python
|
pipelines/pet_clinic_pipeline.py
|
voidcontext/devops-in-practice-workshop
|
6284820fc25d478eea1ab3a5a917eebcc4115300
|
[
"Apache-2.0"
] | null | null | null |
pipelines/pet_clinic_pipeline.py
|
voidcontext/devops-in-practice-workshop
|
6284820fc25d478eea1ab3a5a917eebcc4115300
|
[
"Apache-2.0"
] | null | null | null |
pipelines/pet_clinic_pipeline.py
|
voidcontext/devops-in-practice-workshop
|
6284820fc25d478eea1ab3a5a917eebcc4115300
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from gomatic import *
import os, re
print "Updating PetClinic Pipeline..."
go_server_host = re.search('https?://([a-z0-9.\-._~%]+)', os.environ['GO_SERVER_URL']).group(1)
go_server_url = "%s:%s" % (go_server_host, "8153")
configurator = GoCdConfigurator(HostRestClient(go_server_url))
service_key={'GCLOUD_SERVICE_KEY': 'AES:lVCHjvAhBkxBVacvyO3L+Q==:F90aeeWmwfuk5PylUPjorZX9NkYkLXAls/bRfpTlFRsJVEiBvpPQuRNVlH3dSh8uVweDWC7MV8lsVfYA+ZPsMuWeB/DqwxxTVKPVFuxrh8MYO9z3LBctMhFxSlU9rFtKwbLSsDV8CtMYxm1+A6hQvLl/1rEP8dOQ7UE+zxM90sK9KIMHNRkGvuBiP7hEoatWo5v/bBXyVrGGK+V4H+DS5X+DRSwm0iir2b7y3WgI7AgJAYlg8FbAT+4utYGOpUeZMtYtSCbACnvaZYhWgl1xt9wSb2YxwFn3jzm62nutQE81HxOEoN5LeB02B5PxB92qOnu9Eh+eXmQT/bjNoPqYjeGLgTCGwlxlz6dw6hHo7J00WgdB23FhPp1sLByh2P40dth/TgOme2qpE+n4vunrf5kCNUMyKTjBL0DIb9nTnNXfsx3+cKTN4jyobV2G04Qp/vFC8yY/mRqq2JyIvbhmLObJBvCURttsNde69OsA4WJ5xHFS5YAOiINeiJNhF3EToPbCwRaUtM4QtVwHvKexGIXkKqkuUTlHqM5ydxX5l86aWHwrWU1Uet5rwhfpF6qeGn0gGX0AK+P2XBfxMN9hUqSkkxK0VGk3XyXfJJu4EmAQYet0BZSciQ23WJiS1fhoedLtqLgRZTD3mr6VD6jiwYEljp7tnFZinXEylc4JLeJlLmhLuXOCgr/wN5CWbJRW79YpPKmWdGfD2Z43dKna1rPIiyRwqQixQtxxE0z8uMkf1C2utk6AM5dZTYsnlf8QNdkCDmTecSKE88nHSRpT32qczQQzk2SntCH6HrTnq4ksmXgyQcUEFFWnPc5V3RyJBROUtxuZkq/5oh58GJ3VC7Z5w2JAChI+TJPiK+CtIYoknIto66JDwbofRVQ1OoDBhvrR6nU3PmOUapsDxmkJfXpNvlnepYoNoaw9n6VZCb/mu3SJItTbSrXZq1HFe+eOrV5Ovk2hfcS26l/wlAZLcbk/OLGQ6JKqhRgIvkK80nb6TFDsi3z30NfMDdYa5QlLPwh4WWtAAmS5U1E10vWuILjhL2h48LGwVtJyYvnE9AWDeUc9KlOwrcc4DiUuddi7I+gfHXbmQoaF7NOFGWJDs48JIF+CLut+WzKNGWvrd/y6PPXFssi2LHGIvqugHgceCECGzIwB49mH1hctRWgAcvyn+K6q0MJzA38r0FuTu5lKHtfG/I6mitA2y2Cs0L0SVIYg6XRUScBSq9BVCuqtobntBR8U6DYQETiyixRZbjn61DlbdHFaicI/o0+AGa31QhXOhPkP+cTKqEEHx/Az8gHa7KsCGEi5vdW/q8I5RY3k3u3sjl6SU5x3MHmnQlzwe2QTRBy++4nMyA+Bh35mpkoxK5Y2paGk8YYiSlVmdPFz/f6/C6MWzImgQ+xVFfhFQj3i2AagOosaFIxLndnbGqLmnxZyTG/Yv35VGcvDDcw5kqhbBCCKjNkMCYU5QHhH/eEfqFhFgUSpB9JNngitnnVY+bBYJTQEH3nHr8bk8pEX3hWgcuuEPemr+dKZ7Ig+lz1RPGQ3hlURtNaHywYS4AAMmpzdc5qYYRnhnDESScX29fVuNmV5zpKkadMTiE60OQ5PmIIs9edTFMi4IOMDqaIJuRC2p4AshrbsVwNZME4olQn4W56vtKTWG6jc8E+DSb0Y65SA1FItoaC8bYzpxWaEOWGgvA+uFu5XBTLGy5OlFZnhaieh6lIbvNmNltSJ2GnweTBn2g7EYWWXVYCmJUy+ZGxdSRyVWGvngvRR54E4/V+K94V+rHu4AgbMOozy4XaEVOOyi2AaZXTdNw6Dfub21NIYgsyo+GM/nd2eHizIOcEU+OQ4ll+BHE/x1HvM9OijfZQ78XX/ySIzRUDqcWxixZaqnl17sB7rXpdml96HSxG5Xsp0z9GJmlsMJMuh8EchewXEuUtIHi/CwjvTDD3guio5tm+qzRDHjoblMgvQpcGwHVAmYoTpNzXsOcBft74XOnF5VO5EC/xaJnFrtuUzyh0T6Zf6/SDwmn1vPXOI4qGwV09dH786wHRH++q4qnF/ElEmjLT1sliEKW94sW2FNTENV7HZqBoKdCr/63jB6DLdQXvFKRAZNgGIIMoJCjrLvUVn9Vz6o1PsReO+H+eLgQNG+wxTeF7PX9qkNHw3LkOc8L13ujg7OhhsFZI7OdYGD9GWkXfTDlMXGFZ6NfoYTOxkDbA4gkrSUrG33J3kPdw6GXV9ACnTzvFdY1eKdrlfeqI5Q/9rPnKimKch/9zCvVHbdXboILfwH4+tYSqjY0Yti4SQf+kJ5GiOK3NlEKcAnU7IvrBFJH71EkAifXRF+qR20VplMvvmcxqLFqc6XosJeKNc9fLxxlLbHCjRTL5Xqojs3EWqD6cUh4iJz9mPC7oo0iIg2yXiHp4nmS1TfVlRrUAVcQENaq8tL9rN7lfrdPVSFvoe/ippowxBl71+IrJajnsTLH25C59y9cmGJDU/XYRU6prRazypYz5ukl2O18JcrO/FIzkz1AwuCo93wzoO7wZN8FSfXJnaKYM4glHgV71yIvUlrphNb8teF5GjjLjjiJOP0IYVFNXYuhpwXK2RqeZyTetxUPy5zqrbj5gygzGeVq9qhTt7y+cW9hKMCydHLJKC3CQkADweHvrGBVculv9aC4XXDdLCgk/Khd06JFYHXgHV9UhfB8m8TwdGeI8fIsuHiOH3nWaT4zF2JqK4PDFA/OslU9S7BO6hVT2yo64bwGEJPOMcVriH9S0H+k4QPfW7C0RWI5J+cttT1MdWuYVKYkStzTj+xQZZw310POed0z9lISZne/HT4DYSXFgI8d78EB55D2O3K9zE8wvhiOa6bbId+LcPgeexOu86vXmsAwMSzmSvTyQKd49sUzTQGNSZ+d7T2dZH3U3mJcLi6Li/Co3H5YH0skE+MgId14lnc5V0vx1Bqui+85fd6RwpWCcrgu79KyNg+0s/0TEL+NBYb+zDe33fN/ESC4VUzGjWfYWmhG69uubeB1ViTFpUWqfi99d+W9/NsYfQegzLnnG0IJvacD6C66Yklc5wKObxRLtib1E4YuuHDE8xWREtsErP3xV4LDXMct+2omFfapzSU3L/YT4lFexrkzsJj5x2IEvQ9H+qd4kY5yxAZ3NEnq5BGC9xrDTLRIYUrNVZ3j8mBhnYfL9hzdb5ZTqMhOJmB6zA26LtKxv7Ec910iOfvt2kX8YlxhbZUuML2IuqMmsK80T1kbeqEyDQLLTUYXVGWQkDqiLjKj6felsp98iTCQj7GaKEL9itw6o1iKB9MNleZ7ngSq+IiJxQ/ktCeBrWtdkDQ54C8r4xYUachhrqc4yJIdxHo0Br979ki0L2VTAEfuJFfTMhU4xBD6OypZu5xwHmeCJqN98RUN/uxPa2Ufj9271UmFJ90bIwqjf/y5bcfm5uc1pHLIUUiBaK5TypbZQ0/8YH5isDhQSebq96iARHqKcCFo+r6EyiQMSSQTW/BuW1wgKV8FAI1x2DqnHh1vBVf5HSBGrg8t7iZsktQsYaNBGjvY3O57qMgTAjxc8voItKikEsWGPZqAiXZ/GlQNFihrqtxKMoSH72WUcZYlGDU+OTbxMowp0uN2hPAwzsuZQJTTchccf/APZSQom8g5pEVtzJyHgyz8Acl1eu3uDNmwrP82WyTl/5M9/w+S1MXpLqbPxx3xME31HYHqe8zqhXhLTUu54RsRVXzv3/WMwTgOBz7BATM1o17Bopi/te1tyF3bjP/s2Qd8XhG628zAgdkCB9I94EdQ28e4m5Kd1EbsQ3TIgstK+FoOi7EuXnYzZTSzgXHEO5U8V/Q2JEC+NjNO7XcxILxNJ7AdNRJvnSWGRXkU2TNdK/5SLlnGIpBQe/yJ8fJE3vCuBxzEHHJslOsDDs6X6043URdfRl7r0C0gmep1PJvCZUdDmnxZ0fixx5AIm2+Jd4u4xVKS2kfGCGRvmgcZXeOPmmVmKWCUaRlKWAB1W0jtU1ItFwW0cQ6W7LZCgdReMggHv4ZskP+4u92amgrpeHs2BRsQ61BaDFInhp87SkdzUu9wRNv5MLb/Pg4IJGFCgJd55rFD4f6dwpTyfwi5TErfacYyyH5YHZtW2E3dVuLjZUlgQFjczR7xeM+PteLEjizg4Xb+E3XnU2tk0e29z0qqNtKv21d9oCTepTPrKeLQ=='}
pipeline = configurator\
.ensure_pipeline_group("sample")\
.ensure_replacement_of_pipeline("PetClinic")\
.set_git_material(GitMaterial("https://github.com/voidcontext/devops-in-practice-workshop.git", ignore_patterns=set(['pipelines/*']))).ensure_environment_variables({'GCLOUD_PROJECT_ID': 'devops-workshop-gabor'}).ensure_encrypted_environment_variables(service_key)
stage = pipeline.ensure_stage("commit")
job = stage.ensure_job("build-and-publish").ensure_environment_variables({'MAVEN_OPTS': '-Xmx1024m'}).set_elastic_profile_id("docker-jdk")
job.add_task(ExecTask(['./mvnw', 'clean package']))
job.add_task(ExecTask(['bash', '-c', 'docker build --tag pet-app:$GO_PIPELINE_LABEL --build-arg JAR_FILE=target/spring-petclinic-2.0.0.BUILD-SNAPSHOT.jar .']))
job.add_task(ExecTask(['bash', '-c', 'docker login -u _json_key -p"$(echo $GCLOUD_SERVICE_KEY | base64 -d)" https://us.gcr.io']))
job.add_task(ExecTask(['bash', '-c', 'docker tag pet-app:$GO_PIPELINE_LABEL us.gcr.io/$GCLOUD_PROJECT_ID/pet-app:$GO_PIPELINE_LABEL']))
job.add_task(ExecTask(['bash', '-c', 'docker push us.gcr.io/$GCLOUD_PROJECT_ID/pet-app:$GO_PIPELINE_LABEL']))
stage = pipeline.ensure_stage("deploy")
job = stage.ensure_job("deploy").ensure_environment_variables({'GCLOUD_ZONE': 'us-central1-a', 'GCLOUD_CLUSTER': 'devops-workshop-gke'}).set_elastic_profile_id("kubectl")
job.add_task(ExecTask(['bash', '-c', 'echo $GCLOUD_SERVICE_KEY | base64 -d > secret.json && chmod 600 secret.json']))
job.add_task(ExecTask(['bash', '-c', 'gcloud auth activate-service-account --key-file secret.json']))
job.add_task(ExecTask(['bash', '-c', 'gcloud container clusters get-credentials $GCLOUD_CLUSTER --zone $GCLOUD_ZONE --project $GCLOUD_PROJECT_ID']))
job.add_task(ExecTask(['bash', './deploy.sh']))
job.add_task(ExecTask(['bash', '-c', 'rm secret.json']))
stage = pipeline.ensure_stage("approve-canary")
stage.set_has_manual_approval()
job = stage \
.ensure_job("complete-canary") \
.ensure_environment_variables({'GCLOUD_ZONE': 'us-central1-a', 'GCLOUD_PROJECT_ID': 'devops-workshop-gabor', 'GCLOUD_CLUSTER': 'devops-workshop-gke'})
job.set_elastic_profile_id('kubectl')
job.add_task(ExecTask(['bash', '-c', 'echo $GCLOUD_SERVICE_KEY | base64 -d > secret.json && chmod 600 secret.json']))
job.add_task(ExecTask(['bash', '-c', 'gcloud auth activate-service-account --key-file secret.json']))
job.add_task(ExecTask(['bash', '-c', 'gcloud container clusters get-credentials $GCLOUD_CLUSTER --zone $GCLOUD_ZONE --project $GCLOUD_PROJECT_ID']))
job.add_task(ExecTask(['bash', '-c', './complete-canary.sh']))
job.add_task(ExecTask(['bash', '-c', 'rm secret.json']))
configurator.save_updated_config()
| 168.255814
| 4,251
| 0.873117
| 561
| 7,235
| 11.069519
| 0.461676
| 0.014493
| 0.024155
| 0.043478
| 0.191304
| 0.182609
| 0.160386
| 0.141707
| 0.141707
| 0.124638
| 0
| 0.096733
| 0.026952
| 7,235
| 42
| 4,252
| 172.261905
| 0.785369
| 0.002764
| 0
| 0.216216
| 0
| 0.081081
| 0.789992
| 0.630579
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0.054054
| null | null | 0.027027
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3caa254c21eb19940faeed87fd0149d55352adcc
| 66,990
|
py
|
Python
|
solutions/130_surrounded_regions.py
|
abawchen/leetcode
|
41d3b172a7694a46a860fbcb0565a3acccd000f2
|
[
"MIT"
] | null | null | null |
solutions/130_surrounded_regions.py
|
abawchen/leetcode
|
41d3b172a7694a46a860fbcb0565a3acccd000f2
|
[
"MIT"
] | null | null | null |
solutions/130_surrounded_regions.py
|
abawchen/leetcode
|
41d3b172a7694a46a860fbcb0565a3acccd000f2
|
[
"MIT"
] | null | null | null |
class Solution:
# @param {character[][]} board
# @return {void} Do not return anything, modify board in-place instead.
def solve(self, board):
if not board or not board[0]:
return
self.dic = {}
self.rows = len(board)
self.columns = len(board[0])
self.directions = [(-1, 0), (0, -1), (1, 0), (0, 1)]
check = lambda x, y: board[x][y] == 'O' and not self.dic.has_key((x, y))
for r in [0, self.rows-1]:
for (x, y) in [(r, n) for n in xrange(self.columns)]:
if check(x, y):
self._dfs(board, (x, y))
for c in [0, self.columns-1]:
for (x, y) in [(n, c) for n in xrange(self.rows)]:
if check(x, y):
self._dfs(board, (x, y))
for x in xrange(self.rows):
s = ""
for y in xrange(self.columns):
s += 'X' if board[x][y] == 'X' or check(x, y) else 'O'
board[x] = s
def _dfs(self, board, (x, y)):
stack = [(x, y)]
self.dic[(x, y)] = False
while stack:
(x, y) = stack.pop()
for (a, b) in map(lambda (i, j): (x+i, y+j), self.directions):
if a >= 0 and a < self.rows and b >= 0 and b < self.columns and board[a][b] == 'O' and not self.dic.has_key((a, b)):
stack.append((a, b))
self.dic[(a, b)] = False
# def _dfs(self, board, (x, y)):
# self.dic[(x, y)] = False
# for (a, b) in map(lambda (i, j): (x+i, y+j), self.directions):
# if a >= 0 and a < self.rows and b >= 0 and b < self.columns and board[a][b] == 'O' and not self.dic.has_key((a, b)):
# self._dfs(board, (a, b))
s = Solution()
# s.solve(None)
# s.solve([])
# s.solve([[]])
# s.solve(['O'])
# s.solve(['X'])
# s.solve(['OX', 'XO'])
# s.solve(['XX', 'XO'])
s.solve(['XXX', 'XOX', 'XXX'])
# s.solve([
# 'XXXX',
# 'XOOX',
# 'XXOX',
# 'XOXX'
# ])
s.solve([
'XXXOX',
'XOOXX',
'XXOOX',
'XOXXX'
])
# s.solve([
# 'XXXOX',
# 'XOOXX',
# 'XXOOO',
# 'XXOXX'
# ])
# s.solve([
# 'XXXOX',
# 'XOOXX',
# 'XXOOO',
# 'XXXXX'
# ])
# s.solve([
# 'XXXXX',
# 'XOXOX',
# 'XXOXX',
# 'OOOOX',
# 'XXXXX',
# ])
# s.solve([
# 'XXXXX',
# 'XOXOX',
# 'XXOXX',
# 'XOOOX',
# 'XXXXO',
# ])
# s.solve([
# "XOXXX",
# "XOXOX",
# "XXOXX",
# "OOOOX",
# "XXXXX"
# ])
# s.solve([
# "XOXXXXXXX",
# "XOXOXXXXX",
# "XXOXXXXXX",
# "XOOOXXXXX",
# ])
board = ["OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","OXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX","OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO","XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXO"]
# print len(board), len(board[0])
# for b in board:
# print b
# s.solve(board)
# self.dic = {}
# self.rows = len(board)
# self.columns = len(board[0])
# self.directions = [(-1, 0), (0, -1), (1, 0), (0, 1)]
# for i in xrange(self.rows):
# for j in xrange(self.columns):
# if board[i][j] == 'O' and not self.dic.has_key((i, j)):
# self._dfs(board, (i, j))
# print self.dic
# print board
# def _dfs(self, board, (i, j))
# if i in [0, self.rows-1] or j in [0, self.columns-1]:
# self.dic[(i, j)] = False
# else:
# self.dic[(i, j)] = True
# for x, y in self.directions:
# if board[i+x][j+y] == 'O':
# self.dic[(i, j)] &= self.dic[(i+x, j+y)] if self.dic.has_key((i+x, j+y)) else self._dfs(board, (i+x, j+y))
# if not self.dic[(i, j)]:
# break
# if self.dic[(i, j)]:
# board[i][j] = 'X'
# return self.dic[(i, j)]
| 408.47561
| 63,259
| 0.956531
| 770
| 66,990
| 83.201299
| 0.107792
| 0.491688
| 0.725825
| 0.967767
| 0.987872
| 0.984578
| 0.982783
| 0.981659
| 0.981659
| 0.981659
| 0
| 0.000488
| 0.020511
| 66,990
| 164
| 63,260
| 408.47561
| 0.975875
| 0.0289
| 0
| 0.1
| 0
| 0
| 0.962535
| 0.962012
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 16
|
3cc83fce90a8128f0cdda03fd0253115fdc6e30b
| 50,939
|
py
|
Python
|
pyredis/commands.py
|
schlitzered/pyredis
|
4f5049aae7ae2702f22ccfb661ca3f13869a3c31
|
[
"MIT"
] | 43
|
2015-12-15T12:39:18.000Z
|
2021-05-13T08:52:26.000Z
|
pyredis/commands.py
|
schlitzered/pyredis
|
4f5049aae7ae2702f22ccfb661ca3f13869a3c31
|
[
"MIT"
] | 7
|
2016-03-07T14:16:33.000Z
|
2019-10-17T15:24:23.000Z
|
pyredis/commands.py
|
schlitzered/pyredis
|
4f5049aae7ae2702f22ccfb661ca3f13869a3c31
|
[
"MIT"
] | 15
|
2015-11-19T03:22:26.000Z
|
2021-10-16T15:29:56.000Z
|
__author__ = 'schlitzer'
__all__ = [
'Connection',
'Hash',
'HyperLogLog',
'Key',
'List',
'Publish',
'Scripting',
'Set',
'SSet',
'String',
'Subscribe',
'Transaction'
]
class BaseCommand(object):
def __init__(self):
self._cluster = False
def execute(self, *args, **kwargs):
raise NotImplemented
class Connection(BaseCommand):
def __init__(self):
super().__init__()
def echo(self, *args, shard_key=None, sock=None):
""" Execute ECHO Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'ECHO', *args, shard_key=shard_key, sock=sock)
return self.execute(b'ECHO', *args)
def ping(self, shard_key=None, sock=None):
""" Execute PING Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result,exception
"""
if self._cluster:
return self.execute(b'PING', shard_key=shard_key, sock=sock)
return self.execute(b'PING')
class Geo(BaseCommand):
def __init__(self):
super().__init__()
def geoadd(self, *args):
""" Execute GEOADD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GEOADD', *args, shard_key=args[0])
return self.execute(b'GEOADD', *args)
def geodist(self, *args):
""" Execute GEODIST Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GEODIST', *args, shard_key=args[0])
return self.execute(b'GEODIST', *args)
def geohash(self, *args):
""" Execute GEOHASH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GEOHASH', *args, shard_key=args[0])
return self.execute(b'GEOHASH', *args)
def georadius(self, *args):
""" Execute GEORADIUS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GEORADIUS', *args, shard_key=args[0])
return self.execute(b'GEORADIUS', *args)
def geopos(self, *args):
""" Execute GEOPOS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GEOPOS', *args, shard_key=args[0])
return self.execute(b'GEOPOS', *args)
def georadiusbymember(self, *args):
""" Execute GEORADIUSBYMEMBER Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GEORADIUSBYMEMBER', *args, shard_key=args[0])
return self.execute(b'GEORADIUSBYMEMBER', *args)
class Key(BaseCommand):
def __init__(self):
super().__init__()
def delete(self, *args):
""" Execute DEL Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'DEL', *args, shard_key=args[0])
return self.execute(b'DEL', *args)
def dump(self, *args):
""" Execute DUMP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'DUMP', *args, shard_key=args[0])
return self.execute(b'DUMP', *args)
def exists(self, *args):
""" Execute EXISTS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'EXISTS', *args, shard_key=args[0])
return self.execute(b'EXISTS', *args)
def expire(self, *args):
""" Execute EXPIRE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'EXPIRE', *args, shard_key=args[0])
return self.execute(b'EXPIRE', *args)
def expireat(self, *args):
""" Execute EXPIREAT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'EXPIREAT')
return self.execute(b'EXPIREAT', *args)
def keys(self, *args, shard_key=None, sock=None):
""" Execute KEYS Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'KEYS', *args, shard_key=shard_key, sock=sock)
return self.execute(b'KEYS', *args)
def migrate(self, *args):
""" Execute MIGRATE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
raise NotImplemented
return self.execute(b'MIGRATE', *args)
def move(self, *args):
""" Execute MOVE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'MOVE', *args, shard_key=args[0])
return self.execute(b'MOVE', *args)
def object(self, *args, shard_key=None, sock=None):
""" Execute OBJECT Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'DEL', *args, shard_key=shard_key, sock=sock)
return self.execute(b'OBJECT', *args)
def persist(self, *args):
""" Execute PERSIST Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PERSIST', *args, shard_key=args[0])
return self.execute(b'PERSIST', *args)
def pexpire(self, *args):
""" Execute PEXPIRE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PEXPIRE', *args, shard_key=args[0])
return self.execute(b'PEXPIRE', *args)
def pexpireat(self, *args):
""" Execute PEXPIREAT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PEXPIREAT', *args, shard_key=args[0])
return self.execute(b'PEXPIREAT', *args)
def pttl(self, *args):
""" Execute PTTL Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PTTL', *args, shard_key=args[0])
return self.execute(b'PTTL', *args)
def randomkey(self, *args, shard_key=None, sock=None):
""" Execute RANDOMKEY Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'RANDOMKEY', *args, shard_key=shard_key, sock=sock)
return self.execute(b'RANDOMKEY', *args)
def rename(self, *args):
""" Execute RENAME Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'RENAME', *args, shard_key=args[0])
return self.execute(b'RENAME', *args)
def renamenx(self, *args):
""" Execute RENAMENX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'RENAMENX', *args, shard_key=args[0])
return self.execute(b'RENAMENX', *args)
def restore(self, *args):
""" Execute RESTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'RESTORE', *args, shard_key=args[0])
return self.execute(b'RESTORE', *args)
def scan(self, *args, shard_key=None, sock=None):
""" Execute SCAN Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'SCAN', *args, shard_key=shard_key, sock=sock)
return self.execute(b'SCAN', *args)
def sort(self, *args):
""" Execute SORT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SORT', *args, shard_key=args[0])
return self.execute(b'SORT', *args)
def ttl(self, *args):
""" Execute TTL Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'TTL', *args, shard_key=args[0])
return self.execute(b'TTL', *args)
def type(self, *args):
""" Execute TYPE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'TYPE', *args, shard_key=args[0])
return self.execute(b'TYPE', *args)
def wait(self, *args):
""" Execute WAIT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'WAIT', *args, shard_key=args[0])
return self.execute(b'WAIT', *args)
class String(BaseCommand):
def __init__(self):
super().__init__()
def append(self, *args):
""" Execute APPEND Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'APPEND', *args, shard_key=args[0])
return self.execute(b'APPEND', *args)
def bitcount(self, *args):
""" Execute BITCOUNT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'BITCOUNT', *args, shard_key=args[0])
return self.execute(b'BITCOUNT', *args)
def bitfield(self, *args):
""" Execute BITFIELD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'BITFIELD', *args, shard_key=args[0])
return self.execute(b'BITFIELD', *args)
def bitop(self, *args):
""" Execute BITOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'BITOP', *args, shard_key=args[1])
return self.execute(b'BITOP', *args)
def bitpos(self, *args):
""" Execute BITPOS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'BITPOS', *args, shard_key=args[0])
return self.execute(b'BITPOS', *args)
def decr(self, *args):
""" Execute DECR Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'DECR', *args, shard_key=args[0])
return self.execute(b'DECR', *args)
def decrby(self, *args):
""" Execute DECRBY Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'DECRBY', *args, shard_key=args[0])
return self.execute(b'DECRBY', *args)
def get(self, *args):
""" Execute GET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GET', *args, shard_key=args[0])
return self.execute(b'GET', *args)
def getbit(self, *args):
""" Execute GETBIT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GETBIT', *args, shard_key=args[0])
return self.execute(b'GETBIT', *args)
def getrange(self, *args):
""" Execute GETRANGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GETRANGE', *args, shard_key=args[0])
return self.execute(b'GETRANGE', *args)
def getset(self, *args):
""" Execute GETSET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'GETSET', *args, shard_key=args[0])
return self.execute(b'GETSET', *args)
def incr(self, *args):
""" Execute INCR Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'INCR', *args, shard_key=args[0])
return self.execute(b'INCR', *args)
def incrby(self, *args):
""" Execute INCRBY Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'INCRBY', *args, shard_key=args[0])
return self.execute(b'INCRBY', *args)
def incrbyfloat(self, *args):
""" Execute INCRBYFLOAT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'INCRBYFLOAT', *args, shard_key=args[0])
return self.execute(b'INCRBYFLOAT', *args)
def mget(self, *args):
""" Execute MGET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'MGET', *args, shard_key=args[0])
return self.execute(b'MGET', *args)
def mset(self, *args):
""" Execute MSET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'MSET', *args, shard_key=args[0])
return self.execute(b'MSET', *args)
def msetnx(self, *args):
""" Execute MSETNX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'MSETNX', *args, shard_key=args[0])
return self.execute(b'MSETNX', *args)
def psetex(self, *args):
""" Execute PSETEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PSETEX', *args, shard_key=args[0])
return self.execute(b'PSETEX', *args)
def set(self, *args):
""" Execute SET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SET', *args, shard_key=args[0])
return self.execute(b'SET', *args)
def setbit(self, *args):
""" Execute SETBIT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SETBIT', *args, shard_key=args[0])
return self.execute(b'SETBIT', *args)
def setex(self, *args):
""" Execute SETEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SETEX', *args, shard_key=args[0])
return self.execute(b'SETEX', *args)
def setnx(self, *args):
""" Execute SETNX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SETNX', *args, shard_key=args[0])
return self.execute(b'SETNX', *args)
def setrange(self, *args):
""" Execute SETRANGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SETRANGE', *args, shard_key=args[0])
return self.execute(b'SETRANGE', *args)
def strlen(self, *args):
""" Execute STRLEN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'STRLEN', *args, shard_key=args[0])
return self.execute(b'STRLEN', *args)
class Hash(BaseCommand):
def __init__(self):
super().__init__()
def hdel(self, *args):
""" Execute HDEL Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HDEL', *args, shard_key=args[0])
return self.execute(b'HDEL', *args)
def hexists(self, *args):
""" Execute HEXISTS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HEXISTS', *args, shard_key=args[0])
return self.execute(b'HEXISTS', *args)
def hget(self, *args):
""" Execute HGET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HGET', *args, shard_key=args[0])
return self.execute(b'HGET', *args)
def hgetall(self, *args):
""" Execute HGETALL Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HGETALL', *args, shard_key=args[0])
return self.execute(b'HGETALL', *args)
def hincrby(self, *args):
""" Execute HINCRBY Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HINCRBY', *args, shard_key=args[0])
return self.execute(b'HINCRBY', *args)
def hincrbyfloat(self, *args):
""" Execute HINCRBYFLOAT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HINCRBYFLOAT', *args, shard_key=args[0])
return self.execute(b'HINCRBYFLOAT', *args)
def hkeys(self, *args):
""" Execute HKEYS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HKEYS', *args, shard_key=args[0])
return self.execute(b'HKEYS', *args)
def hlen(self, *args):
""" Execute HLEN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HLEN', *args, shard_key=args[0])
return self.execute(b'HLEN', *args)
def hmget(self, *args):
""" Execute HMGET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HMGET', *args, shard_key=args[0])
return self.execute(b'HMGET', *args)
def hmset(self, *args):
""" Execute HMSET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HMSET', *args, shard_key=args[0])
return self.execute(b'HMSET', *args)
def hset(self, *args):
""" Execute HSET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HSET', *args, shard_key=args[0])
return self.execute(b'HSET', *args)
def hsetnx(self, *args):
""" Execute HSETNX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HSETNX', *args, shard_key=args[0])
return self.execute(b'HSETNX', *args)
def hstrlen(self, *args):
""" Execute HSTRLEN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HSTRLEN', *args, shard_key=args[0])
return self.execute(b'HSTRLEN', *args)
def hvals(self, *args):
""" Execute HVALS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HVALS', *args, shard_key=args[0])
return self.execute(b'HVALS', *args)
def hscan(self, *args):
""" Execute HSCAN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'HSCAN', *args, shard_key=args[0])
return self.execute(b'HSCAN', *args)
class List(BaseCommand):
def __init__(self):
super().__init__()
def blpop(self, *args):
""" Execute BLPOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'BLPOP', *args, shard_key=args[0])
return self.execute(b'BLPOP', *args)
def brpop(self, *args):
""" Execute BRPOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'BRPOP', *args, shard_key=args[0])
return self.execute(b'BRPOP', *args)
def brpoplpush(self, *args):
""" Execute BRPOPPUSH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'BRPOPPUSH', *args, shard_key=args[0])
return self.execute(b'BRPOPPUSH', *args)
def lindex(self, *args):
""" Execute LINDEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LINDEX', *args, shard_key=args[0])
return self.execute(b'LINDEX', *args)
def linsert(self, *args):
""" Execute LINSERT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LINSERT', *args, shard_key=args[0])
return self.execute(b'LINSERT', *args)
def llen(self, *args):
""" Execute LLEN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LLEN', *args, shard_key=args[0])
return self.execute(b'LLEN', *args)
def lpop(self, *args):
""" Execute LPOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LPOP', *args, shard_key=args[0])
return self.execute(b'LPOP', *args)
def lpush(self, *args):
""" Execute LPUSH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LPUSH', *args, shard_key=args[0])
return self.execute(b'LPUSH', *args)
def lpushx(self, *args):
""" Execute LPUSHX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LPUSHX', *args, shard_key=args[0])
return self.execute(b'LPUSHX', *args)
def lrange(self, *args):
""" Execute LRANGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LRANGE', *args, shard_key=args[0])
return self.execute(b'LRANGE', *args)
def lrem(self, *args):
""" Execute LREM Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LREM', *args, shard_key=args[0])
return self.execute(b'LREM', *args)
def lset(self, *args):
""" Execute LSET Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LSET', *args, shard_key=args[0])
return self.execute(b'LSET', *args)
def ltrim(self, *args):
""" Execute LTRIM Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'LTRIM', *args, shard_key=args[0])
return self.execute(b'LTRIM', *args)
def rpop(self, *args):
""" Execute RPOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'RPOP', *args, shard_key=args[0])
return self.execute(b'RPOP', *args)
def rpoplpush(self, *args):
""" Execute RPOPLPUSH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'RPOPLPUSH', *args, shard_key=args[0])
return self.execute(b'RPOPLPUSH', *args)
def rpush(self, *args):
""" Execute RPUSH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'RPUSH', *args, shard_key=args[0])
return self.execute(b'RPUSH', *args)
def rpushx(self, *args):
""" Execute RPUSHX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'RPUSHX', *args, shard_key=args[0])
return self.execute(b'RPUSHX', *args)
class Set(BaseCommand):
def __init__(self):
super().__init__()
def sadd(self, *args):
""" Execute SADD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SADD', *args, shard_key=args[0])
return self.execute(b'SADD', *args)
def scard(self, *args):
""" Execute SCARD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SCARD', *args, shard_key=args[0])
return self.execute(b'SCARD', *args)
def sdiff(self, *args):
""" Execute SDIFF Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SDIFF', *args, shard_key=args[0])
return self.execute(b'SDIFF', *args)
def sdiffstore(self, *args):
""" Execute SDIFFSTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SDIFFSTORE', *args, shard_key=args[0])
return self.execute(b'SDIFFSTORE', *args)
def sinter(self, *args):
""" Execute SINTER Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SINTER', *args, shard_key=args[0])
return self.execute(b'SINTER', *args)
def sinterstore(self, *args):
""" Execute SINTERSTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SINTERSTORE', *args, shard_key=args[0])
return self.execute(b'SINTERSTORE', *args)
def sismember(self, *args):
""" Execute SISMEMBER Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SISMEMBER', *args, shard_key=args[0])
return self.execute(b'SISMEMBER', *args)
def smembers(self, *args):
""" Execute SMEMBERS Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SMEMBERS', *args, shard_key=args[0])
return self.execute(b'SMEMBERS', *args)
def smove(self, *args):
""" Execute SMOVE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SMOVE', *args, shard_key=args[0])
return self.execute(b'SMOVE', *args)
def spop(self, *args):
""" Execute SPOP Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SPOP', *args, shard_key=args[0])
return self.execute(b'SPOP', *args)
def srandmember(self, *args):
""" Execute SRANDMEMBER Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SRANDMEMBER', *args, shard_key=args[0])
return self.execute(b'SRANDMEMBER', *args)
def srem(self, *args):
""" Execute SREM Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SREM', *args, shard_key=args[0])
return self.execute(b'SREM', *args)
def sunion(self, *args):
""" Execute SUNION Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SUNION', *args, shard_key=args[0])
return self.execute(b'SUNION', *args)
def sunoinstore(self, *args):
""" Execute SUNIONSTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SUNIONSTORE', *args, shard_key=args[0])
return self.execute(b'SUNIONSTORE', *args)
def sscan(self, *args):
""" Execute SSCAN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'SSCAN', *args, shard_key=args[0])
return self.execute(b'SSCAN', *args)
class SSet(BaseCommand):
def __init__(self):
super().__init__()
def zadd(self, *args):
""" Execute ZADD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZADD', *args, shard_key=args[0])
return self.execute(b'ZADD', *args)
def zcard(self, *args):
""" Execute ZCARD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZCARD', *args, shard_key=args[0])
return self.execute(b'ZCARD', *args)
def zcount(self, *args):
""" Execute ZCOUNT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZCOUNT', *args, shard_key=args[0])
return self.execute(b'ZCOUNT', *args)
def zincrby(self, *args):
""" Execute ZINCRBY Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZINCRBY', *args, shard_key=args[0])
return self.execute(b'ZINCRBY', *args)
def zinterstore(self, *args):
""" Execute ZINTERSTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZINTERSTORE', *args, shard_key=args[0])
return self.execute(b'ZINTERSTORE', *args)
def zlexcount(self, *args):
""" Execute ZLEXCOUNT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZLEXCOUNT', *args, shard_key=args[0])
return self.execute(b'ZLEXCOUNT', *args)
def zrange(self, *args):
""" Execute ZRANGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZRANGE', *args, shard_key=args[0])
return self.execute(b'ZRANGE', *args)
def zrangebylex(self, *args):
""" Execute ZRANGEBYLEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZRANGEBYLEX', *args, shard_key=args[0])
return self.execute(b'ZRANGEBYLEX', *args)
def zrangebyscore(self, *args):
""" Execute ZRANGEBYSCORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZRANGEBYSCORE', *args, shard_key=args[0])
return self.execute(b'ZRANGEBYSCORE', *args)
def zrank(self, *args):
""" Execute ZRANK Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZRANK', *args, shard_key=args[0])
return self.execute(b'ZRANK', *args)
def zrem(self, *args):
""" Execute ZREM Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREM', *args, shard_key=args[0])
return self.execute(b'ZREM', *args)
def zremrangebylex(self, *args):
""" Execute ZREMRANGEBYLEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREMRANGEBYLEX', *args, shard_key=args[0])
return self.execute(b'ZREMRANGEBYLEX', *args)
def zremrangebyrank(self, *args):
""" Execute ZREMRANGEBYRANK Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREMRANGEBYRANK', *args, shard_key=args[0])
return self.execute(b'ZREMRANGEBYRANK', *args)
def zremrangebyscrore(self, *args):
""" Execute ZREMRANGEBYSCORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREMRANGEBYSCORE', *args, shard_key=args[0])
return self.execute(b'ZREMRANGEBYSCORE', *args)
def zrevrange(self, *args):
""" Execute ZREVRANGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREVRANGE', *args, shard_key=args[0])
return self.execute(b'ZREVRANGE', *args)
def zrevrangebylex(self, *args):
""" Execute ZREVRANGEBYLEX Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREVRANGEBYLEX', *args, shard_key=args[0])
return self.execute(b'ZREVRANGEBYLEX', *args)
def zrevrangebyscore(self, *args):
""" Execute ZREVRANGEBYSCORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREVRANGEBYSCORE', *args, shard_key=args[0])
return self.execute(b'ZREVRANGEBYSCORE', *args)
def zrevrank(self, *args):
""" Execute ZREVRANK Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZREVRANK', *args, shard_key=args[0])
return self.execute(b'ZREVRANK', *args)
def zscore(self, *args):
""" Execute ZSCORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZSCORE', *args, shard_key=args[0])
return self.execute(b'ZSCORE', *args)
def zunionstore(self, *args):
""" Execute ZUNIONSTORE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZUNIONSTORE', *args, shard_key=args[0])
return self.execute(b'ZUNIONSTORE', *args)
def zscan(self, *args):
""" Execute ZSCAN Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'ZSCAN', *args, shard_key=args[0])
return self.execute(b'ZSCAN', *args)
class HyperLogLog(BaseCommand):
def __init__(self):
super().__init__()
def pfadd(self, *args):
""" Execute PFADD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PFADD', *args, shard_key=args[0])
return self.execute(b'PFADD', *args)
def pfcount(self, *args):
""" Execute PFCOUNT Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PFCOUNT', *args, shard_key=args[0])
return self.execute(b'PFCOUNT', *args)
def pfmerge(self, *args):
""" Execute PFMERGE Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'PFMERGE', *args, shard_key=args[0])
return self.execute(b'PFMERGE', *args)
class Publish(BaseCommand):
def __init__(self):
super().__init__()
def publish(self, *args):
""" Execute PUBLISH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
raise NotImplemented
return self.execute(b'PUBLISH', *args)
class Subscribe(object):
def write(self, *args):
raise NotImplemented
def psubscribe(self, *args):
""" Execute PSUBSCRIBE Command, consult Redis documentation for details.
:return: result, exception
"""
return self.write(b'PSUBSCRIBE', *args)
def punsubscribe(self, *args):
""" Execute PUNSUBSCRIBE Command, consult Redis documentation for details.
:return: result, exception
"""
return self.write(b'PUNSUBSCRIBE', *args)
def subscribe(self, *args):
""" Execute SUBSCRIBE Command, consult Redis documentation for details.
:return: result, exception
"""
return self.write(b'SUBSCRIBE', *args)
def unsubscribe(self, *args):
""" Execute UNSUBSCRIBE Command, consult Redis documentation for details.
:return: result, exception
"""
return self.write(b'UNSUBSCRIBE', *args)
class Transaction(BaseCommand):
def __init__(self):
super().__init__()
def discard(self, *args, shard_key=None, sock=None):
""" Execute DISCARD Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'DISCARD', *args, shard_key=shard_key, sock=sock)
return self.execute(b'DISCARD', *args)
def exec(self, *args, shard_key=None, sock=None):
""" Execute EXEC Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'EXEC', *args, shard_key=shard_key, sock=sock)
return self.execute(b'EXEC', *args)
def multi(self, *args, shard_key=None, sock=None):
""" Execute MULTI Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'MULTI', *args, shard_key=shard_key, sock=sock)
return self.execute(b'MULTI', *args)
def unwatch(self, *args, shard_key=None, sock=None):
""" Execute UNWATCH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'UNWATCH', *args, shard_key=shard_key, sock=sock)
return self.execute(b'UNWATCH', *args)
def watch(self, *args):
""" Execute WATCH Command, consult Redis documentation for details.
:return: result, exception
"""
if self._cluster:
return self.execute(b'WATCH', *args, shard_key=args[0])
return self.execute(b'WATCH', *args)
class Scripting(BaseCommand):
def __init__(self):
super().__init__()
def eval(self, *args, shard_key=None, sock=None):
""" Execute EVAL Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'EVAL', *args, shard_key=shard_key, sock=sock)
return self.execute(b'EVAL', *args)
def evalsha(self, *args, shard_key=None, sock=None):
""" Execute EVALSHA Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'EVALSHA', *args, shard_key=shard_key, sock=sock)
return self.execute(b'EVALSHA', *args)
def script_debug(self, *args, shard_key=None, sock=None):
""" Execute SCRIPT DEBUG Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'SCRIPT', b'DEBUG', *args, shard_key=shard_key, sock=sock)
return self.execute(b'SCRIPT', b'DEBUG', *args)
def script_exists(self, *args, shard_key=None, sock=None):
""" Execute SCRIPT EXISTS Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'SCRIPT', b'EXISTS', *args, shard_key=shard_key, sock=sock)
return self.execute(b'SCRIPT', b'EXISTS', *args)
def script_flush(self, *args, shard_key=None, sock=None):
""" Execute SCRIPT FLUSH Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'SCRIPT', b'FLUSH', *args, shard_key=shard_key, sock=sock)
return self.execute(b'SCRIPT', b'FLUSH', *args)
def script_kill(self, *args, shard_key=None, sock=None):
""" Execute SCRIPT KILL Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'SCRIPT', b'KILL', *args, shard_key=shard_key, sock=sock)
return self.execute(b'SCRIPT', b'KILL', *args)
def script_load(self, *args, shard_key=None, sock=None):
""" Execute SCRIPT LOAD Command, consult Redis documentation for details.
:param shard_key: (optional)
Should be set to the key name you try to work with.
Can not be used if sock is set.
Only used if used with a Cluster Client
:type shard_key: string
:param sock: (optional)
The string representation of a socket, the command should be executed against.
For example: "testhost_6379"
Can not be used if shard_key is set.
Only used if used with a Cluster Client
:type sock: string
:return: result, exception
"""
if self._cluster:
return self.execute(b'SCRIPT', b'LOAD', *args, shard_key=shard_key, sock=sock)
return self.execute(b'SCRIPT', b'LOAD', *args)
| 32.758199
| 92
| 0.604252
| 6,012
| 50,939
| 5.041085
| 0.035762
| 0.091728
| 0.153694
| 0.162735
| 0.84657
| 0.845184
| 0.771538
| 0.75768
| 0.742733
| 0.601247
| 0
| 0.004675
| 0.286185
| 50,939
| 1,554
| 93
| 32.779279
| 0.828828
| 0.373054
| 0
| 0.26979
| 0
| 0
| 0.071244
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.253635
| false
| 0
| 0
| 0
| 0.725363
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5959dd25c73172cbbe4312f64657c86f293b080e
| 41,145
|
py
|
Python
|
46/swagger_client/api/wordnet_controller_api.py
|
apitore/apitore-sdk-python
|
c0814c5635ddd09e9a20fcb155b62122bee41d33
|
[
"Apache-2.0"
] | 3
|
2018-08-21T06:14:33.000Z
|
2019-10-18T23:05:50.000Z
|
46/swagger_client/api/wordnet_controller_api.py
|
apitore/apitore-sdk-python
|
c0814c5635ddd09e9a20fcb155b62122bee41d33
|
[
"Apache-2.0"
] | null | null | null |
46/swagger_client/api/wordnet_controller_api.py
|
apitore/apitore-sdk-python
|
c0814c5635ddd09e9a20fcb155b62122bee41d33
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
WordNet APIs
You can access ALL WordNet DB.<BR />[Endpoint] https://api.apitore.com/api/46 # noqa: E501
OpenAPI spec version: 0.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class WordnetControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def sensebysynset_using_get(self, access_token, synset, **kwargs): # noqa: E501
"""WordNet WebAPI. Return Sense object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.sensebysynset_using_get(access_token, synset, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param str synset: Synset (required)
:param str lang: Language. [jpn:japanese,eng:english]
:return: SenseResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.sensebysynset_using_get_with_http_info(access_token, synset, **kwargs) # noqa: E501
else:
(data) = self.sensebysynset_using_get_with_http_info(access_token, synset, **kwargs) # noqa: E501
return data
def sensebysynset_using_get_with_http_info(self, access_token, synset, **kwargs): # noqa: E501
"""WordNet WebAPI. Return Sense object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.sensebysynset_using_get_with_http_info(access_token, synset, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param str synset: Synset (required)
:param str lang: Language. [jpn:japanese,eng:english]
:return: SenseResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['access_token', 'synset', 'lang'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method sensebysynset_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'access_token' is set
if ('access_token' not in params or
params['access_token'] is None):
raise ValueError("Missing the required parameter `access_token` when calling `sensebysynset_using_get`") # noqa: E501
# verify the required parameter 'synset' is set
if ('synset' not in params or
params['synset'] is None):
raise ValueError("Missing the required parameter `synset` when calling `sensebysynset_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'access_token' in params:
query_params.append(('access_token', params['access_token'])) # noqa: E501
if 'synset' in params:
query_params.append(('synset', params['synset'])) # noqa: E501
if 'lang' in params:
query_params.append(('lang', params['lang'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/wordnet/sense/bysynset', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SenseResponseEntity', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def sensebywordid_using_get(self, access_token, wordid, **kwargs): # noqa: E501
"""WordNet WebAPI. Return Sense object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.sensebywordid_using_get(access_token, wordid, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param int wordid: Word ID (required)
:return: SenseResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.sensebywordid_using_get_with_http_info(access_token, wordid, **kwargs) # noqa: E501
else:
(data) = self.sensebywordid_using_get_with_http_info(access_token, wordid, **kwargs) # noqa: E501
return data
def sensebywordid_using_get_with_http_info(self, access_token, wordid, **kwargs): # noqa: E501
"""WordNet WebAPI. Return Sense object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.sensebywordid_using_get_with_http_info(access_token, wordid, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param int wordid: Word ID (required)
:return: SenseResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['access_token', 'wordid'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method sensebywordid_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'access_token' is set
if ('access_token' not in params or
params['access_token'] is None):
raise ValueError("Missing the required parameter `access_token` when calling `sensebywordid_using_get`") # noqa: E501
# verify the required parameter 'wordid' is set
if ('wordid' not in params or
params['wordid'] is None):
raise ValueError("Missing the required parameter `wordid` when calling `sensebywordid_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'access_token' in params:
query_params.append(('access_token', params['access_token'])) # noqa: E501
if 'wordid' in params:
query_params.append(('wordid', params['wordid'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/wordnet/sense/bywordid', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SenseResponseEntity', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def synlinkby_synset_using_get(self, access_token, synset, **kwargs): # noqa: E501
"""WordNet WebAPI. Return SynLink object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.synlinkby_synset_using_get(access_token, synset, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param str synset: Synset (required)
:param str link: Link. You can specify several link by csv format. [also, syns, hype, inst, hypo, hasi, mero, mmem, msub, mprt, holo, hmem, hsub, hprt, attr, sim, enta, caus, dmnc, dmnu, dmnr, dmtc, dmtu, dmtr, ants]
:return: SynlinkResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.synlinkby_synset_using_get_with_http_info(access_token, synset, **kwargs) # noqa: E501
else:
(data) = self.synlinkby_synset_using_get_with_http_info(access_token, synset, **kwargs) # noqa: E501
return data
def synlinkby_synset_using_get_with_http_info(self, access_token, synset, **kwargs): # noqa: E501
"""WordNet WebAPI. Return SynLink object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.synlinkby_synset_using_get_with_http_info(access_token, synset, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param str synset: Synset (required)
:param str link: Link. You can specify several link by csv format. [also, syns, hype, inst, hypo, hasi, mero, mmem, msub, mprt, holo, hmem, hsub, hprt, attr, sim, enta, caus, dmnc, dmnu, dmnr, dmtc, dmtu, dmtr, ants]
:return: SynlinkResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['access_token', 'synset', 'link'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method synlinkby_synset_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'access_token' is set
if ('access_token' not in params or
params['access_token'] is None):
raise ValueError("Missing the required parameter `access_token` when calling `synlinkby_synset_using_get`") # noqa: E501
# verify the required parameter 'synset' is set
if ('synset' not in params or
params['synset'] is None):
raise ValueError("Missing the required parameter `synset` when calling `synlinkby_synset_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'access_token' in params:
query_params.append(('access_token', params['access_token'])) # noqa: E501
if 'synset' in params:
query_params.append(('synset', params['synset'])) # noqa: E501
if 'link' in params:
query_params.append(('link', params['link'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/wordnet/synlink/bysynset', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SynlinkResponseEntity', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def synsetby_name_using_get(self, access_token, name, pos, **kwargs): # noqa: E501
"""WordNet WebAPI. Return Synset object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.synsetby_name_using_get(access_token, name, pos, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param str name: Name (required)
:param str pos: Part-of-speech. [n:noun,v:verb,a:adjective,r:adverb] (required)
:return: SynsetResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.synsetby_name_using_get_with_http_info(access_token, name, pos, **kwargs) # noqa: E501
else:
(data) = self.synsetby_name_using_get_with_http_info(access_token, name, pos, **kwargs) # noqa: E501
return data
def synsetby_name_using_get_with_http_info(self, access_token, name, pos, **kwargs): # noqa: E501
"""WordNet WebAPI. Return Synset object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.synsetby_name_using_get_with_http_info(access_token, name, pos, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param str name: Name (required)
:param str pos: Part-of-speech. [n:noun,v:verb,a:adjective,r:adverb] (required)
:return: SynsetResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['access_token', 'name', 'pos'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method synsetby_name_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'access_token' is set
if ('access_token' not in params or
params['access_token'] is None):
raise ValueError("Missing the required parameter `access_token` when calling `synsetby_name_using_get`") # noqa: E501
# verify the required parameter 'name' is set
if ('name' not in params or
params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `synsetby_name_using_get`") # noqa: E501
# verify the required parameter 'pos' is set
if ('pos' not in params or
params['pos'] is None):
raise ValueError("Missing the required parameter `pos` when calling `synsetby_name_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'access_token' in params:
query_params.append(('access_token', params['access_token'])) # noqa: E501
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
if 'pos' in params:
query_params.append(('pos', params['pos'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/wordnet/synset/byname', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SynsetResponseEntity', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def synsetbysynset_using_get(self, access_token, synset, **kwargs): # noqa: E501
"""WordNet WebAPI. Return Synset object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.synsetbysynset_using_get(access_token, synset, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param str synset: Synset (required)
:return: SynsetResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.synsetbysynset_using_get_with_http_info(access_token, synset, **kwargs) # noqa: E501
else:
(data) = self.synsetbysynset_using_get_with_http_info(access_token, synset, **kwargs) # noqa: E501
return data
def synsetbysynset_using_get_with_http_info(self, access_token, synset, **kwargs): # noqa: E501
"""WordNet WebAPI. Return Synset object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.synsetbysynset_using_get_with_http_info(access_token, synset, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param str synset: Synset (required)
:return: SynsetResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['access_token', 'synset'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method synsetbysynset_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'access_token' is set
if ('access_token' not in params or
params['access_token'] is None):
raise ValueError("Missing the required parameter `access_token` when calling `synsetbysynset_using_get`") # noqa: E501
# verify the required parameter 'synset' is set
if ('synset' not in params or
params['synset'] is None):
raise ValueError("Missing the required parameter `synset` when calling `synsetbysynset_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'access_token' in params:
query_params.append(('access_token', params['access_token'])) # noqa: E501
if 'synset' in params:
query_params.append(('synset', params['synset'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/wordnet/synset/bysynset', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SynsetResponseEntity', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def synsetdefbysynset_using_get(self, access_token, synset, lang, **kwargs): # noqa: E501
"""WordNet WebAPI. Return SynsetDef object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.synsetdefbysynset_using_get(access_token, synset, lang, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param str synset: Synset (required)
:param str lang: Language. [jpn:japanese,eng:english] (required)
:return: SynsetDefResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.synsetdefbysynset_using_get_with_http_info(access_token, synset, lang, **kwargs) # noqa: E501
else:
(data) = self.synsetdefbysynset_using_get_with_http_info(access_token, synset, lang, **kwargs) # noqa: E501
return data
def synsetdefbysynset_using_get_with_http_info(self, access_token, synset, lang, **kwargs): # noqa: E501
"""WordNet WebAPI. Return SynsetDef object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.synsetdefbysynset_using_get_with_http_info(access_token, synset, lang, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param str synset: Synset (required)
:param str lang: Language. [jpn:japanese,eng:english] (required)
:return: SynsetDefResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['access_token', 'synset', 'lang'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method synsetdefbysynset_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'access_token' is set
if ('access_token' not in params or
params['access_token'] is None):
raise ValueError("Missing the required parameter `access_token` when calling `synsetdefbysynset_using_get`") # noqa: E501
# verify the required parameter 'synset' is set
if ('synset' not in params or
params['synset'] is None):
raise ValueError("Missing the required parameter `synset` when calling `synsetdefbysynset_using_get`") # noqa: E501
# verify the required parameter 'lang' is set
if ('lang' not in params or
params['lang'] is None):
raise ValueError("Missing the required parameter `lang` when calling `synsetdefbysynset_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'access_token' in params:
query_params.append(('access_token', params['access_token'])) # noqa: E501
if 'synset' in params:
query_params.append(('synset', params['synset'])) # noqa: E501
if 'lang' in params:
query_params.append(('lang', params['lang'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/wordnet/synsetdef/bysynset', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SynsetDefResponseEntity', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def wordbyid_using_get(self, access_token, wordid, **kwargs): # noqa: E501
"""WordNet WebAPI. Return Word object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.wordbyid_using_get(access_token, wordid, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param int wordid: Word ID (required)
:return: WordResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.wordbyid_using_get_with_http_info(access_token, wordid, **kwargs) # noqa: E501
else:
(data) = self.wordbyid_using_get_with_http_info(access_token, wordid, **kwargs) # noqa: E501
return data
def wordbyid_using_get_with_http_info(self, access_token, wordid, **kwargs): # noqa: E501
"""WordNet WebAPI. Return Word object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.wordbyid_using_get_with_http_info(access_token, wordid, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param int wordid: Word ID (required)
:return: WordResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['access_token', 'wordid'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method wordbyid_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'access_token' is set
if ('access_token' not in params or
params['access_token'] is None):
raise ValueError("Missing the required parameter `access_token` when calling `wordbyid_using_get`") # noqa: E501
# verify the required parameter 'wordid' is set
if ('wordid' not in params or
params['wordid'] is None):
raise ValueError("Missing the required parameter `wordid` when calling `wordbyid_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'access_token' in params:
query_params.append(('access_token', params['access_token'])) # noqa: E501
if 'wordid' in params:
query_params.append(('wordid', params['wordid'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/wordnet/word/bywordid', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WordResponseEntity', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def wordbylemma_using_get(self, access_token, lemma, **kwargs): # noqa: E501
"""WordNet WebAPI. Return Word object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.wordbylemma_using_get(access_token, lemma, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param str lemma: Lemma (required)
:param str pos: Part-of-speech. You can specify several pos by csv format. [n:noun,v:verb,a:adjective,r:adverb]
:return: WordResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.wordbylemma_using_get_with_http_info(access_token, lemma, **kwargs) # noqa: E501
else:
(data) = self.wordbylemma_using_get_with_http_info(access_token, lemma, **kwargs) # noqa: E501
return data
def wordbylemma_using_get_with_http_info(self, access_token, lemma, **kwargs): # noqa: E501
"""WordNet WebAPI. Return Word object. # noqa: E501
Japanese WordNet.<BR />Response<BR /> Github: <a href=\"https://github.com/keigohtr/apitore-response-parent/tree/master/wordnet-response\">wordnet-response</a><BR /> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.wordbylemma_using_get_with_http_info(access_token, lemma, async=True)
>>> result = thread.get()
:param async bool
:param str access_token: Access Token (required)
:param str lemma: Lemma (required)
:param str pos: Part-of-speech. You can specify several pos by csv format. [n:noun,v:verb,a:adjective,r:adverb]
:return: WordResponseEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['access_token', 'lemma', 'pos'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method wordbylemma_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'access_token' is set
if ('access_token' not in params or
params['access_token'] is None):
raise ValueError("Missing the required parameter `access_token` when calling `wordbylemma_using_get`") # noqa: E501
# verify the required parameter 'lemma' is set
if ('lemma' not in params or
params['lemma'] is None):
raise ValueError("Missing the required parameter `lemma` when calling `wordbylemma_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'access_token' in params:
query_params.append(('access_token', params['access_token'])) # noqa: E501
if 'lemma' in params:
query_params.append(('lemma', params['lemma'])) # noqa: E501
if 'pos' in params:
query_params.append(('pos', params['pos'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/wordnet/word/bylemma', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WordResponseEntity', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 44.771491
| 224
| 0.623211
| 4,742
| 41,145
| 5.204133
| 0.048503
| 0.049275
| 0.025488
| 0.023341
| 0.964705
| 0.956844
| 0.950887
| 0.940352
| 0.927101
| 0.925804
| 0
| 0.015635
| 0.274055
| 41,145
| 918
| 225
| 44.820261
| 0.810573
| 0.068368
| 0
| 0.792757
| 1
| 0
| 0.207931
| 0.055898
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.008048
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
59a604738db2ba7aa7c659b090e7b7bfdaeb3dad
| 1,097
|
py
|
Python
|
tests/test_module_level_fixtures.py
|
ubaumann/nuts
|
a2a70cc56be8cf85da6c6b3842c846ee9bbe1e9f
|
[
"MIT"
] | 14
|
2021-04-28T09:38:12.000Z
|
2021-11-01T20:48:27.000Z
|
tests/test_module_level_fixtures.py
|
ubaumann/nuts
|
a2a70cc56be8cf85da6c6b3842c846ee9bbe1e9f
|
[
"MIT"
] | 32
|
2021-07-12T10:02:06.000Z
|
2022-01-21T09:43:28.000Z
|
tests/test_module_level_fixtures.py
|
ubaumann/nuts
|
a2a70cc56be8cf85da6c6b3842c846ee9bbe1e9f
|
[
"MIT"
] | 1
|
2021-07-26T22:08:46.000Z
|
2021-07-26T22:08:46.000Z
|
from tests.utils import YAML_EXTENSION
def test_load_module_fixture(pytester):
arguments = {
"test_module_level_fixtures": """
---
- test_module: tests.base_tests.module_level_fixtures
test_class: TestModuleLevelFixture
test_data: []
"""
}
pytester.makefile(YAML_EXTENSION, **arguments)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
def test_load_module_fixture_multiple_test_definitions(pytester):
arguments = {
"test_module_level_fixtures": """
---
- test_module: tests.base_tests.module_level_fixtures
test_class: TestModuleLevelFixture
test_data: []
""",
"test_module_level_fixtures2": """
---
- test_module: tests.base_tests.module_level_fixtures
test_class: TestModuleLevelFixture
test_data: []
""",
}
pytester.makefile(YAML_EXTENSION, **arguments)
result = pytester.runpytest()
result.assert_outcomes(passed=2)
| 28.868421
| 65
| 0.611668
| 100
| 1,097
| 6.3
| 0.3
| 0.095238
| 0.150794
| 0.18254
| 0.869841
| 0.793651
| 0.793651
| 0.793651
| 0.793651
| 0.793651
| 0
| 0.003876
| 0.294439
| 1,097
| 37
| 66
| 29.648649
| 0.810078
| 0
| 0
| 0.709677
| 0
| 0
| 0.542388
| 0.236098
| 0
| 0
| 0
| 0
| 0.064516
| 1
| 0.064516
| false
| 0.064516
| 0.032258
| 0
| 0.096774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
59e2c601531d026277afbb6e8cc64466b1e73740
| 497
|
py
|
Python
|
geometry.py
|
catzilla-007/fractal-overload
|
5d173124aaefdb84539248e22275fd2ee39522d8
|
[
"MIT"
] | null | null | null |
geometry.py
|
catzilla-007/fractal-overload
|
5d173124aaefdb84539248e22275fd2ee39522d8
|
[
"MIT"
] | null | null | null |
geometry.py
|
catzilla-007/fractal-overload
|
5d173124aaefdb84539248e22275fd2ee39522d8
|
[
"MIT"
] | null | null | null |
def rotate_left(point_a, point_b, sin, cos):
x1, y1 = point_a
x2, y2 = point_b
x_rot = (x2 * cos) - (y2 * sin) + (x1 * (1 - cos)) + (y1 * sin)
y_rot = (x2 * sin) + (y2 * cos) + (y1 * (1 - cos)) - (x1 * sin)
return x_rot, y_rot
def rotate_right(point_a, point_b, sin, cos):
x1, y1 = point_a
x2, y2 = point_b
x_rot = (x2 * cos) - (y2 * sin) + (x1 * (1 - cos)) + (y1 * sin)
y_rot = (x2 * sin) + (y2 * cos) + (y1 * (1 - cos)) - (x1 * sin)
return x_rot, y_rot
| 33.133333
| 67
| 0.501006
| 88
| 497
| 2.625
| 0.204545
| 0.103896
| 0.095238
| 0.103896
| 0.883117
| 0.883117
| 0.883117
| 0.883117
| 0.883117
| 0.883117
| 0
| 0.080692
| 0.301811
| 497
| 14
| 68
| 35.5
| 0.585014
| 0
| 0
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
59e5cfeefe9d32252c2b79c87f8d8caaa8b38e40
| 70
|
py
|
Python
|
gs_pipe/mods/example.py
|
yowenter/gs-pipe
|
2f79c186fe1305a6dcab2859d54f24751d6368f9
|
[
"Apache-2.0"
] | 2
|
2018-04-01T07:20:29.000Z
|
2018-11-29T01:41:05.000Z
|
gs_pipe/mods/example.py
|
yowenter/gs-pipe
|
2f79c186fe1305a6dcab2859d54f24751d6368f9
|
[
"Apache-2.0"
] | null | null | null |
gs_pipe/mods/example.py
|
yowenter/gs-pipe
|
2f79c186fe1305a6dcab2859d54f24751d6368f9
|
[
"Apache-2.0"
] | null | null | null |
def square(x):
return x*x
def minus_one(x):
return x - 1
| 7
| 17
| 0.557143
| 13
| 70
| 2.923077
| 0.538462
| 0.368421
| 0.421053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0.328571
| 70
| 9
| 18
| 7.777778
| 0.787234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
59e851858cc4b13ec79ed1505dd70052e6bd5cac
| 48
|
py
|
Python
|
flask_tests/pure/__init__.py
|
fp12/flask-tests
|
b14913ea4d56ff6429df34a08f23ca802f52d01f
|
[
"MIT"
] | null | null | null |
flask_tests/pure/__init__.py
|
fp12/flask-tests
|
b14913ea4d56ff6429df34a08f23ca802f52d01f
|
[
"MIT"
] | null | null | null |
flask_tests/pure/__init__.py
|
fp12/flask-tests
|
b14913ea4d56ff6429df34a08f23ca802f52d01f
|
[
"MIT"
] | null | null | null |
from .pure_demo import blueprint, setup # noqa
| 24
| 47
| 0.770833
| 7
| 48
| 5.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 48
| 1
| 48
| 48
| 0.9
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.