hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6c7400b8ac66e43e353498c9ff0117c041f10263
| 4,729
|
py
|
Python
|
src/generate_random_graph.py
|
suning-opensource/frustrated-random-walk
|
7de559c20e96567a61853668f36d786b126ed57f
|
[
"Apache-2.0"
] | 18
|
2020-12-24T04:26:21.000Z
|
2022-03-24T07:32:39.000Z
|
src/generate_random_graph.py
|
kiminh/frustrated-random-walk
|
7de559c20e96567a61853668f36d786b126ed57f
|
[
"Apache-2.0"
] | 1
|
2021-05-07T06:47:34.000Z
|
2021-05-07T06:47:34.000Z
|
src/generate_random_graph.py
|
kiminh/frustrated-random-walk
|
7de559c20e96567a61853668f36d786b126ed57f
|
[
"Apache-2.0"
] | 8
|
2020-10-22T23:51:55.000Z
|
2021-08-24T06:36:19.000Z
|
import os
import numpy as np
def generate_random_graph(n, p, random_graph_file_name):
assert(n > 1)
assert(p > 0 and p <= 1)
edges = []
for i in range(1, n+1):
for j in range(1, n+1):
random = np.random.uniform(0, 1)
if (random < p):
edge = str(i) + ";" + str(j)
edges.append(edge)
edges = list(set(edges))
writer = open(random_graph_file_name, "w")
for edge in edges:
writer.write(edge + "\n")
writer.close()
def generate_undirected_random_graph(n, p, random_graph_file_name):
assert(n > 1)
assert(p > 0 and p <= 1)
edges = []
for i in range(1, n + 1):
for j in range(i + 1, n + 1):
random = np.random.uniform(0, 1)
if (random < p):
edge = str(i) + ";" + str(j)
edges.append(edge)
edges = list(set(edges))
writer = open(random_graph_file_name, "w")
for edge in edges:
writer.write(edge + "\n")
writer.close()
def generate_random_communities(community_vertex_number, community_number, pin, pout, community_file_name):
assert(pin >= 0 and pin <= 1)
assert(pout >= 0 and pout <= 1)
all_vertices = []
counter = 0
for i in range(community_number):
vertices = []
for j in range(community_vertex_number):
counter += 1
vertices.append(counter)
all_vertices.append(vertices)
edges = []
for i in range(len(all_vertices)):
vertices = all_vertices[i]
for j in range(len(vertices)):
for k in range(len(vertices)):
if j != k:
r = np.random.uniform()
if r < pin:
edges.append(str(vertices[j]) + ";" + str(vertices[k]))
for i in range(len(all_vertices)):
for j in range(i+1, len(all_vertices)):
left_vertices = all_vertices[i]
right_vertices = all_vertices[j]
for k in range(len(left_vertices)):
for l in range(len(right_vertices)):
r = np.random.uniform()
if (r < pout):
edges.append(str(left_vertices[k]) + ";" + str(right_vertices[l]))
edges = list(set(edges))
writer = open(community_file_name, "w")
for edge in edges:
writer.write(edge + "\n")
writer.close()
def generate_undirected_random_communities(community_vertex_number, community_number, pin, pout, community_file_name):
assert(pin >= 0 and pin <= 1)
assert(pout >= 0 and pout <= 1)
all_vertices = []
counter = 0
for i in range(community_number):
vertices = []
for j in range(community_vertex_number):
counter += 1
vertices.append(counter)
all_vertices.append(vertices)
edges = []
for i in range(len(all_vertices)):
vertices = all_vertices[i]
for j in range(len(vertices)):
for k in range(j + 1, len(vertices)):
if j != k:
r = np.random.uniform()
if r < pin:
edge = str(vertices[j]) + ";" + str(vertices[k])
edges.append(edge)
for i in range(len(all_vertices)):
for j in range(i+1, len(all_vertices)):
left_vertices = all_vertices[i]
right_vertices = all_vertices[j]
for k in range(len(left_vertices)):
for l in range(k + 1, len(right_vertices)):
r = np.random.uniform()
if (r < pout):
edge = str(left_vertices[k]) + ";" + str(right_vertices[l])
edges.append(edge)
edges = list(set(edges))
writer = open(community_file_name, "w")
for edge in edges:
writer.write(edge + "\n")
writer.close()
def main_community():
import sys
if (len(sys.argv) != 5):
print "community_vertex_number = sys.argv[1], community_number = sys.argv[2], pin = sys.argv[3], pout = sys.argv[4]. "
return -1
community_vertex_number = int(sys.argv[1])
community_number = int(sys.argv[2])
pin = float(sys.argv[3])
pout = float(sys.argv[4])
generate_undirected_random_communities(community_vertex_number, community_number, pin, pout, "random_communities.csv")
return 0
def main_graph():
import sys
if (len(sys.argv) != 3):
print "n = sys.argv[1], p = sys.argv[2]. "
return -1
n = int(sys.argv[1])
p = float(sys.argv[2])
generate_undirected_random_graph(n, p, "random_graph.csv")
return 0
if __name__ == "__main__":
import sys
sys.exit(main_community())
| 33.302817
| 126
| 0.550222
| 627
| 4,729
| 3.99362
| 0.102073
| 0.061502
| 0.039936
| 0.035144
| 0.850639
| 0.832268
| 0.795927
| 0.795927
| 0.779153
| 0.746805
| 0
| 0.016275
| 0.324381
| 4,729
| 141
| 127
| 33.539007
| 0.767449
| 0
| 0
| 0.744
| 1
| 0.008
| 0.043984
| 0.009516
| 0
| 0
| 0
| 0
| 0.064
| 0
| null | null | 0
| 0.04
| null | null | 0.016
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
669d3a51ce7f6a9762780cc2bac4f7e27335f1f0
| 229
|
py
|
Python
|
src/dbxdeploy/poetry/PoetryPathResolver.py
|
Kukuksumusu/dbx-deploy
|
b46b4cbe1719fd337137880cfc99e818468184f5
|
[
"MIT"
] | 2
|
2021-02-04T09:36:42.000Z
|
2021-02-24T10:07:13.000Z
|
src/dbxdeploy/poetry/PoetryPathResolver.py
|
Kukuksumusu/dbx-deploy
|
b46b4cbe1719fd337137880cfc99e818468184f5
|
[
"MIT"
] | 7
|
2021-04-30T07:20:15.000Z
|
2022-01-03T10:21:52.000Z
|
src/dbxdeploy/poetry/PoetryPathResolver.py
|
Kukuksumusu/dbx-deploy
|
b46b4cbe1719fd337137880cfc99e818468184f5
|
[
"MIT"
] | 3
|
2020-08-27T10:56:16.000Z
|
2021-02-17T07:26:09.000Z
|
from pathlib import Path
class PoetryPathResolver:
def __init__(self, poetry_path: str):
self.__poetry_path = poetry_path
def get_poetry_path(self) -> Path:
return Path(self.__poetry_path).expanduser()
| 22.9
| 52
| 0.716157
| 29
| 229
| 5.172414
| 0.482759
| 0.333333
| 0.28
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.200873
| 229
| 9
| 53
| 25.444444
| 0.819672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
66ac73c49775b320602f6ec15252471d19a06531
| 2,814
|
py
|
Python
|
tests/tests.py
|
wblazej/pyfract
|
3695b482189e90af66cec6672f672c498268b797
|
[
"MIT"
] | 1
|
2021-12-21T15:37:27.000Z
|
2021-12-21T15:37:27.000Z
|
tests/tests.py
|
wblazej/pyfract
|
3695b482189e90af66cec6672f672c498268b797
|
[
"MIT"
] | 1
|
2021-12-21T15:36:48.000Z
|
2022-02-25T21:43:07.000Z
|
tests/tests.py
|
wblazej/pyfract
|
3695b482189e90af66cec6672f672c498268b797
|
[
"MIT"
] | null | null | null |
import unittest
from pyfract.fraction import Fraction
class FractionTests(unittest.TestCase):
def setUp(self) -> None:
self.f1 = Fraction(1, 2)
self.f2 = Fraction(1, 4)
def test_addition(self):
self.assertEqual(self.f1 + self.f2, Fraction(3, 4))
self.assertEqual(self.f1 + 2, Fraction(5, 2))
def test_subtraction(self):
self.assertEqual(self.f1 - self.f2, Fraction(1, 4))
self.assertEqual(self.f1 - 2, Fraction(-3, 2))
def test_multiplication(self):
self.assertEqual(self.f1 * self.f2, Fraction(1, 8))
self.assertEqual(self.f1 * 2, Fraction(1, 1))
def test_division(self):
self.assertEqual(self.f1 / self.f2, Fraction(2, 1))
self.assertEqual(self.f1 / 2, Fraction(1, 4))
def test_less_than(self):
self.assertEqual(self.f1 < self.f2, False)
self.assertEqual(self.f1 < 2, True)
self.assertEqual(self.f1 < 0.5, False)
def test_less_or_equal(self):
self.assertEqual(self.f1 <= self.f2, False)
self.assertEqual(self.f1 <= 2, True)
self.assertEqual(self.f1 <= 0.5, True)
def test_greater_than(self):
self.assertEqual(self.f1 > self.f2, True)
self.assertEqual(self.f1 > 2, False)
self.assertEqual(self.f1 > 0.5, False)
def test_greater_or_equal(self):
self.assertEqual(self.f1 >= self.f2, True)
self.assertEqual(self.f1 >= 2, False)
self.assertEqual(self.f1 >= 0.5, True)
def test_equal(self):
self.assertEqual(self.f1 == self.f2, False)
self.assertEqual(self.f1 == self.f1, True)
self.assertEqual(self.f1 == 0.5, True)
def test_not_equal(self):
self.assertEqual(self.f1 != self.f2, True)
self.assertEqual(self.f1 != self.f1, False)
self.assertEqual(self.f1 != 0.5, False)
def test_from_float(self):
testcases = [[1, 3], [18, 29], [6, 10], [24, 11], [192, 3920], [3901, 890934], [190383, 1093293]]
for testcase in testcases:
x = testcase[0]
y = testcase[1]
f = Fraction.from_float(x / y)
self.assertEqual(f, Fraction(x, y))
def test_from_float_accurately(self):
testcases = [[1, 3], [18, 29], [6, 10], [24, 11], [192, 3920], [3901, 890934], [190383, 1093293]]
for testcase in testcases:
x = testcase[0]
y = testcase[1]
f = Fraction.from_float_accurately(x / y, accuracy=12)
self.assertEqual(f, Fraction(x, y))
def test_to_float(self):
x = self.f1.to_float()
self.assertEqual(x, 0.5)
self.assertEqual(type(x), float)
x = self.f2.to_float()
self.assertEqual(x, 0.25)
self.assertEqual(type(x), float)
if __name__ == "__main__":
unittest.main()
| 32.72093
| 105
| 0.596304
| 390
| 2,814
| 4.207692
| 0.161538
| 0.292505
| 0.301036
| 0.332724
| 0.810481
| 0.761121
| 0.724558
| 0.648995
| 0.556368
| 0.491164
| 0
| 0.085728
| 0.257996
| 2,814
| 85
| 106
| 33.105882
| 0.700192
| 0
| 0
| 0.184615
| 0
| 0
| 0.002843
| 0
| 0
| 0
| 0
| 0
| 0.492308
| 1
| 0.215385
| false
| 0
| 0.030769
| 0
| 0.261538
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
dd2b33a16367fbe77f23b736d0b7ed93ffe6b5ff
| 10,050
|
py
|
Python
|
models/stclassifier.py
|
jnyborg/timematch
|
a652df95282de9a3fc12d2fd204f438ff4ccb122
|
[
"MIT"
] | 2
|
2022-03-22T08:18:08.000Z
|
2022-03-29T10:31:18.000Z
|
models/stclassifier.py
|
jnyborg/timematch
|
a652df95282de9a3fc12d2fd204f438ff4ccb122
|
[
"MIT"
] | null | null | null |
models/stclassifier.py
|
jnyborg/timematch
|
a652df95282de9a3fc12d2fd204f438ff4ccb122
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
import torch.nn as nn
from models.competings import GRU, TempConv
from models.decoder import get_decoder
from models.ltae import LTAE
from models.pse import PixelSetEncoder
from models.tae import TemporalAttentionEncoder
class PseLTae(nn.Module):
"""
Pixel-Set encoder + Lightweight Temporal Attention Encoder sequence classifier
"""
def __init__(
self,
input_dim=10,
mlp1=[10, 32, 64],
pooling="mean_std",
mlp2=[128, 128],
with_extra=True,
extra_size=4,
n_head=16,
d_k=8,
d_model=256,
mlp3=[256, 128],
dropout=0.2,
T=1000,
mlp4=[128, 64, 32],
num_classes=20,
max_temporal_shift=100,
):
super(PseLTae, self).__init__()
if with_extra:
mlp2 = deepcopy(mlp2)
mlp2[0] += extra_size
self.spatial_encoder = PixelSetEncoder(
input_dim,
mlp1=mlp1,
pooling=pooling,
mlp2=mlp2,
with_extra=with_extra,
extra_size=extra_size,
)
self.temporal_encoder = LTAE(
in_channels=mlp2[-1],
n_head=n_head,
d_k=d_k,
d_model=d_model,
n_neurons=mlp3,
dropout=dropout,
T=T,
max_temporal_shift=max_temporal_shift,
)
self.decoder = get_decoder(mlp4, num_classes)
def forward(self, pixels, mask, positions, extra, return_feats=False):
"""
Args:
input(tuple): (Pixel-Set, Pixel-Mask) or ((Pixel-Set, Pixel-Mask), Extra-features)
Pixel-Set : Batch_size x Sequence length x Channel x Number of pixels
Pixel-Mask : Batch_size x Sequence length x Number of pixels
Positions : Batch_size x Sequence length
Extra-features : Batch_size x Sequence length x Number of features
"""
spatial_feats = self.spatial_encoder(pixels, mask, extra)
temporal_feats = self.temporal_encoder(spatial_feats, positions)
logits = self.decoder(temporal_feats)
if return_feats:
return logits, temporal_feats
else:
return logits
def param_ratio(self):
total = get_ntrainparams(self)
s = get_ntrainparams(self.spatial_encoder)
t = get_ntrainparams(self.temporal_encoder)
c = get_ntrainparams(self.decoder)
print("TOTAL TRAINABLE PARAMETERS : {}".format(total))
print(
"RATIOS: Spatial {:5.1f}% , Temporal {:5.1f}% , Classifier {:5.1f}%".format(
s / total * 100, t / total * 100, c / total * 100
)
)
return total
class PseTae(nn.Module):
"""
Pixel-Set encoder + Temporal Attention Encoder sequence classifier
"""
def __init__(
self,
input_dim=10,
mlp1=[10, 32, 64],
pooling="mean_std",
mlp2=[128, 128],
with_extra=True,
extra_size=4,
n_head=4,
d_k=32,
d_model=None,
mlp3=[512, 128, 128],
dropout=0.2,
T=1000,
mlp4=[128, 64, 32],
num_classes=20,
max_temporal_shift=100,
max_position=365,
):
super(PseTae, self).__init__()
if with_extra:
mlp2 = deepcopy(mlp2)
mlp2[0] += 4
self.spatial_encoder = PixelSetEncoder(
input_dim,
mlp1=mlp1,
pooling=pooling,
mlp2=mlp2,
with_extra=with_extra,
extra_size=extra_size,
)
self.temporal_encoder = TemporalAttentionEncoder(
in_channels=mlp2[-1],
n_head=n_head,
d_k=d_k,
d_model=d_model,
n_neurons=mlp3,
dropout=dropout,
T=T,
max_position=max_position,
max_temporal_shift=max_temporal_shift,
)
self.decoder = get_decoder(mlp4, num_classes)
def forward(self, pixels, mask, positions, extra, return_feats=False):
"""
Args:
input(tuple): (Pixel-Set, Pixel-Mask) or ((Pixel-Set, Pixel-Mask), Extra-features)
Pixel-Set : Batch_size x Sequence length x Channel x Number of pixels
Pixel-Mask : Batch_size x Sequence length x Number of pixels
Positions : Batch_size x Sequence length
Extra-features : Batch_size x Sequence length x Number of features
"""
spatial_feats = self.spatial_encoder(pixels, mask, extra)
temporal_feats = self.temporal_encoder(spatial_feats, positions)
logits = self.decoder(temporal_feats)
if return_feats:
return logits, temporal_feats
else:
return logits
def param_ratio(self):
total = get_ntrainparams(self)
s = get_ntrainparams(self.spatial_encoder)
t = get_ntrainparams(self.temporal_encoder)
c = get_ntrainparams(self.decoder)
print("TOTAL TRAINABLE PARAMETERS : {}".format(total))
print(
"RATIOS: Spatial {:5.1f}% , Temporal {:5.1f}% , Classifier {:5.1f}%".format(
s / total * 100, t / total * 100, c / total * 100
)
)
return total
class PseGru(nn.Module):
"""
Pixel-Set encoder + GRU
"""
def __init__(
self,
input_dim=10,
mlp1=[10, 32, 64],
pooling="mean_std",
mlp2=[128, 128],
with_extra=True,
extra_size=4,
hidden_dim=128,
mlp4=[128, 64, 32],
num_classes=20,
max_temporal_shift=100,
max_position=365,
):
super(PseGru, self).__init__()
if with_extra:
mlp2 = deepcopy(mlp2)
mlp2[0] += 4
self.spatial_encoder = PixelSetEncoder(
input_dim,
mlp1=mlp1,
pooling=pooling,
mlp2=mlp2,
with_extra=with_extra,
extra_size=extra_size,
)
self.temporal_encoder = GRU(
in_channels=mlp2[-1],
hidden_dim=hidden_dim,
max_position=max_position,
max_temporal_shift=max_temporal_shift,
)
self.decoder = get_decoder(mlp4, num_classes)
def forward(self, pixels, mask, positions, extra, return_feats=False):
"""
Args:
input(tuple): (Pixel-Set, Pixel-Mask) or ((Pixel-Set, Pixel-Mask), Extra-features)
Pixel-Set : Batch_size x Sequence length x Channel x Number of pixels
Pixel-Mask : Batch_size x Sequence length x Number of pixels
Positions : Batch_size x Sequence length
Extra-features : Batch_size x Sequence length x Number of features
"""
spatial_feats = self.spatial_encoder(pixels, mask, extra)
temporal_feats = self.temporal_encoder(spatial_feats, positions)
logits = self.decoder(temporal_feats)
if return_feats:
return logits, temporal_feats
else:
return logits
def param_ratio(self):
total = get_ntrainparams(self)
s = get_ntrainparams(self.spatial_encoder)
t = get_ntrainparams(self.temporal_encoder)
c = get_ntrainparams(self.decoder)
print("TOTAL TRAINABLE PARAMETERS : {}".format(total))
print(
"RATIOS: Spatial {:5.1f}% , Temporal {:5.1f}% , Classifier {:5.1f}%".format(
s / total * 100, t / total * 100, c / total * 100
)
)
return total
class PseTempCNN(nn.Module):
"""
Pixel-Set encoder + GRU
"""
def __init__(
self,
input_dim=10,
mlp1=[10, 32, 64],
pooling="mean_std",
mlp2=[128, 128],
with_extra=True,
extra_size=4,
nker=[32, 32, 128],
mlp3=[128, 128],
seq_len=24,
mlp4=[128, 64, 32],
num_classes=20,
max_temporal_shift=100,
max_position=365,
):
super(PseTempCNN, self).__init__()
if with_extra:
mlp2 = deepcopy(mlp2)
mlp2[0] += 4
self.spatial_encoder = PixelSetEncoder(
input_dim,
mlp1=mlp1,
pooling=pooling,
mlp2=mlp2,
with_extra=with_extra,
extra_size=extra_size,
)
self.temporal_encoder = TempConv(
input_size=mlp2[-1],
nker=nker,
seq_len=seq_len,
nfc=mlp3,
max_position=max_position,
max_temporal_shift=max_temporal_shift,
)
self.decoder = get_decoder(mlp4, num_classes)
def forward(self, pixels, mask, positions, extra, return_feats=False):
"""
Args:
input(tuple): (Pixel-Set, Pixel-Mask) or ((Pixel-Set, Pixel-Mask), Extra-features)
Pixel-Set : Batch_size x Sequence length x Channel x Number of pixels
Pixel-Mask : Batch_size x Sequence length x Number of pixels
Positions : Batch_size x Sequence length
Extra-features : Batch_size x Sequence length x Number of features
"""
spatial_feats = self.spatial_encoder(pixels, mask, extra)
temporal_feats = self.temporal_encoder(spatial_feats, positions)
logits = self.decoder(temporal_feats)
if return_feats:
return logits, temporal_feats
else:
return logits
def param_ratio(self):
total = get_ntrainparams(self)
s = get_ntrainparams(self.spatial_encoder)
t = get_ntrainparams(self.temporal_encoder)
c = get_ntrainparams(self.decoder)
print("TOTAL TRAINABLE PARAMETERS : {}".format(total))
print(
"RATIOS: Spatial {:5.1f}% , Temporal {:5.1f}% , Classifier {:5.1f}%".format(
s / total * 100, t / total * 100, c / total * 100
)
)
return total
def get_ntrainparams(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
| 31.114551
| 93
| 0.57194
| 1,162
| 10,050
| 4.740964
| 0.109294
| 0.046288
| 0.029043
| 0.052278
| 0.89617
| 0.88782
| 0.88782
| 0.88782
| 0.88782
| 0.88782
| 0
| 0.044159
| 0.333035
| 10,050
| 322
| 94
| 31.21118
| 0.777711
| 0.155721
| 0
| 0.777328
| 0
| 0.016194
| 0.051395
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.02834
| 0.004049
| 0.149798
| 0.032389
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dd3a34c9dd938987c33755e8991e3ac8b6e891b6
| 99
|
py
|
Python
|
teitoku/intermediate/__init__.py
|
yukinotenshi/teitoku
|
adb54fb7f709e0bac0da6d6f6f8aa00702c2f9c5
|
[
"MIT"
] | null | null | null |
teitoku/intermediate/__init__.py
|
yukinotenshi/teitoku
|
adb54fb7f709e0bac0da6d6f6f8aa00702c2f9c5
|
[
"MIT"
] | null | null | null |
teitoku/intermediate/__init__.py
|
yukinotenshi/teitoku
|
adb54fb7f709e0bac0da6d6f6f8aa00702c2f9c5
|
[
"MIT"
] | 1
|
2020-01-25T10:53:44.000Z
|
2020-01-25T10:53:44.000Z
|
from teitoku.intermediate.request import Request
from teitoku.intermediate.response import Response
| 49.5
| 50
| 0.888889
| 12
| 99
| 7.333333
| 0.5
| 0.25
| 0.522727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070707
| 99
| 2
| 50
| 49.5
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
dd6e7379fb654ebe65ae123fa4174ef78d3771a4
| 35,665
|
py
|
Python
|
bin/fingal/fieldERT.py
|
LutzGross/fingal
|
4b6fcc02871e7ba1a98f37ffd18f1a16a5fe6a48
|
[
"Apache-2.0"
] | null | null | null |
bin/fingal/fieldERT.py
|
LutzGross/fingal
|
4b6fcc02871e7ba1a98f37ffd18f1a16a5fe6a48
|
[
"Apache-2.0"
] | null | null | null |
bin/fingal/fieldERT.py
|
LutzGross/fingal
|
4b6fcc02871e7ba1a98f37ffd18f1a16a5fe6a48
|
[
"Apache-2.0"
] | null | null | null |
"""
tools for ERT inversion including ERT cost functions for inversion
by l.gross@uq.edu.au, 2021
"""
from esys.escript import *
import numpy as np
from esys.downunder import MeteredCostFunction
from esys.escript.linearPDEs import LinearSinglePDE, SolverOptions
from esys.escript.pdetools import Locator, ArithmeticTuple, MaskFromTag, getInfLocator
import logging
from esys.weipa import saveVTK, saveSilo
from .tools import setupERTPDE
lslogger=logging.getLogger('inv.minimizer')
class DCInversionByFieldIntensity(MeteredCostFunction):
"""
cost function for electric field intensity inversion (aka FullWaver)
"""
provides_inverse_Hessian_approximation=True
def __init__(self, domain, data, L_stations=1., w0=0., w1=1., alpha0=1., alpha1=0., sigma0=.001, region_fixed=Data(), stationsFMT="e%s",
weightLogDefect=0.5, adjustStationLocationsToElementCenter=True, logclip=15):
"""
cost function for electric field intensity inversion.
regularization is
int( w0* m^2 + w1*grad(m)^2) where log(sigma/sigma0)=p is given a alpha0*p+alpha1*laplace(p)=m
:domain: pde domain
:data: data, is `fingal.SurveyData`, requires 'E' and - if available - 'RELERR_E'
:sigma0: reference conductivity
:w0: weighting L2 regularization int m^2
:w1: weighting H1 regularization int grad(m)^2
:alpha0: regularization factor
:alpha1: regularization factor
:weightLogDefect: weighting factor for the logarithm defect in the cost funtion.
:region_fixed: mask for fixed conductivity. needs to be set if w1>0 and w0=0 or alpha1> and alpha0=0
:adjustStationLocationsToElementCenter: moves the station locations to match element centers.
:stationsFMT: format used to map station keys k to mesh tags stationsFMT%k or None
:logclip: cliping for p to avoid overflow in conductivity calculation
:L_stations: radius of electric field averaging.
"""
super(DCInversionByFieldIntensity, self).__init__()
assert weightLogDefect >=0 and weightLogDefect<=1, "weightLogDefect needs to be between 0 and 1."
self.datatol=1e-30
self.sigma0=sigma0
self.stationsFMT=stationsFMT
self.weightLogDefect=weightLogDefect
self.logclip=logclip
# setup PDE for forward models (potentials are fixed on all faces except the surface)
self.pde=setupERTPDE(domain)
x=self.pde.getDomain().getX()[0]
y=self.pde.getDomain().getX()[1]
z=self.pde.getDomain().getX()[2]
self.pde.setValue(q=whereZero(x-inf(x))+whereZero(x-sup(x))+ whereZero(y-inf(y))+whereZero(y-sup(y))+whereZero(z-inf(z)))
self.data=data
# when points are adjusted get the element center locations:
adjustmax=0.
if adjustStationLocationsToElementCenter:
station_locations=[]
for s in data.getStationNumeration():
station_locations.append(data.getStationLocation(s))
XStations=Locator(ReducedFunction(domain), station_locations).getX()
########################################################
if getMPIRankWorld() == 0: lslogger.info("building misfit weighting (this will take some time)")
Xcenter=ReducedFunction(domain).getX()
X=Function(domain).getX()
self.misfit = lambda: None
self.misfit.data={}
self.misfit.w={}
for AB in self.data.injectionIterator():
self.misfit.w[AB]=Scalar(0.,X.getFunctionSpace())
self.misfit.data[AB]=Scalar(0.,X.getFunctionSpace())
for M in self.data.getObservationElectrodes():
L_ABS=L_stations
if adjustStationLocationsToElementCenter:
xs=XStations[data.getStationNumber(M)]
adjustmax=max(adjustmax, length(xs-self.data.getStationLocation(M)))
else:
xs=self.data.getStationLocation(M)
mask=whereNegative(interpolate(length(Xcenter-xs)-L_ABS, X.getFunctionSpace()))
for A,B in self.data.getInjections(M):
E=self.data.getFieldIntensityData((A,B,M))
RELERR=self.data.getFieldIntensityRelError((A,B,M))
self.misfit.w[(A,B)].copyWithMask(Scalar(1/RELERR**2,self.misfit.w[AB].getFunctionSpace()), mask) # >0 where data are measured @M
self.misfit.data[(A,B)].copyWithMask(Scalar(E,self.misfit.w[AB].getFunctionSpace()), mask) # data inserted @ M
if getMPIRankWorld() == 0: lslogger.debug("re-scaling of misfit weights:")
for AB in self.data.injectionIterator():
s=integrate(self.misfit.w[AB])
self.misfit.data[AB]+=whereNonPositive(self.misfit.w[AB]) # data inserted @ M
assert s>0, "no observation for dipole %s. Maybe you need to increase the value for L_stations."%(str(AB))
if s > 0:
self.misfit.w[AB]*=1./(s*len(self.misfit.w))
# primary potentials:
if getMPIRankWorld() == 0:
lslogger.info("maximal station adjustment is %e"%adjustmax)
lslogger.info("building primary electric fields (this will take some time)")
self.phi_p=self.getPrimaryElectricPotentials(sigma0)
lslogger.info("Primary potentials for %d injections calculated."%(len(self.phi_p) ))
# this defines the regularization:
self.w0=w0
self.w1=w1
self.alpha0=alpha0
self.alpha1=alpha1
# used for Hessian inverse
self.Hpde=setupERTPDE(domain)
self.Hpde.setValue(A=w1*kronecker(3), D=w0, q=region_fixed)
if self.alpha1 > 0:
self.Spde=setupERTPDE(domain)
self.Spde.setValue(A=self.alpha1*kronecker(3), D=self.alpha0)
if not self.alpha0 > 0:
self.Spde.setValue(q=region_fixed)
else:
self.Spde=None
def getPrimaryElectricPotentials(self, sigma):
"""
return the primary electric potential for all injections (A,B) using conductivity sigma
:sigma: (primary) conductivity distribution
:return: dictonary of injections (A,B)->primary_potential
"""
primary_potential={}
self.pde.setValue(A=sigma*kronecker(3), y_dirac=Data(), Y=Data(), X=Data())
for A in self.data.getListOfInjectionStations():
s=Scalar(0.,DiracDeltaFunctions(self.pde.getDomain()))
if self.stationsFMT is None:
s.setTaggedValue(A,1.)
else:
s.setTaggedValue(self.stationsFMT%A,1.)
self.pde.setValue(y_dirac=s)
primary_potential[A]=self.pde.getSolution()
txt=str(primary_potential[A])
if getMPIRankWorld() == 0:
lslogger.debug("primary potential for injection at %d -> %s"%(A,txt))
return primary_potential
def getSecondaryElectricPotentials(self, sigma, sigma0, primary_potentials):
"""
return the primary electric potential for all injections (A,B) using conductivity sigma
for the primary conductivity sigma0 and potentials
:sigma: (primary) conductivity distribution primary_potentials
:return: dictonary of injections (A,B)->secondary_potential
"""
secondary_potential={}
txt=str(sigma)
if getMPIRankWorld() == 0:
lslogger.debug("getSecondaryElectricPotentials: sigma="+txt)
self.pde.setValue(A=sigma*kronecker(self.pde.getDim()), X=Data(), Y=Data(), y_dirac=Data())
for A in primary_potentials:
self.pde.setValue(X=(sigma0-sigma)*grad(primary_potentials[A]))
secondary_potential[A]=self.pde.getSolution()
return secondary_potential
def optimizeSigma0(self, m):
"""
returns a new conductivity, a scaling factor and new misfit by minimizing the misfit using conductivity sigma=f*sigma0 over
factor f.
"""
raise NotImplemented
def scaleSigma0(self, f=1.):
"""
rescales sigma0 by factor f.
"""
raise NotImplemented
def getSigma(self, m, isSmoothed=False):
"""
return the conductivity for a given property function m. If isSmoothed=True
it is assumed that m is already smoothed by (alpha0*I+alpha1*laplace)^{-1}. Otherwise smoothing is applied.
"""
if not isSmoothed:
if self.Spde :
self.Spde.setValue(Y=m)
p=self.Spde.getSolution()
else:
p=m*self.alpha0
else:
p=m
return self.sigma0*exp(p)
def _getDualProduct(self, m, r):
"""
dual product of gradient `r` with increment `m`. Overwrites `getDualProduct` of `MeteredCostFunction`
"""
return integrate(r[0]*m + inner(r[1], grad(m)))
def _getNorm(self, m):
"""
returns the norm of property function `m`. Overwrites `getNorm` of `MeteredCostFunction`
"""
return Lsup(m)
def _getArguments(self, m):
"""
returns values that are used for both the forward as well as the gradient calculation
"""
if self.Spde:
self.Spde.setValue(Y=m)
p=self.Spde.getSolution()
ppi=clip(interpolate(p, Function(self.pde.getDomain())), minval=-self.logclip, maxval=self.logclip)
else:
ppi=clip(interpolate(m/self.alpha0, Function(self.pde.getDomain())), minval=-self.logclip, maxval=self.logclip)
sigma=self.getSigma(ppi, isSmoothed=True)
txt1, txt2=str(ppi), str(m)
if getMPIRankWorld() == 0:
lslogger.debug("p = %s"%(txt1))
lslogger.debug("m = %s"%(txt2))
secondary_potential=self.getSecondaryElectricPotentials(sigma, self.sigma0, self.phi_p)
return secondary_potential, sigma, ppi
def _getValue(self, m, *args):
"""
return the value of the cost function. Overwrites `getValue` of `MeteredCostFunction`
"""
if len(args)==0:
args=self.getArguments(m)
secondary_potential=args[0]
sigma=args[1]
ppi=args[2]
mi=interpolate(m, Function(self.pde.getDomain()))
A1=self.w1*integrate(length(grad(m))**2)
A0=self.w0*integrate(mi**2)
misfit_log=None
for A,B in self.data.injectionIterator():
if misfit_log is None:
misfit_log=Scalar(0.,self.misfit.w[(A,B)].getFunctionSpace() )
misfit_quad=Scalar(0.,self.misfit.w[(A,B)].getFunctionSpace() )
E_AB=-grad(secondary_potential[A]-secondary_potential[B]+self.phi_p[A]-self.phi_p[B], self.misfit.w[(A,B)].getFunctionSpace())
EI_AB=length(E_AB)+self.datatol
misfit_log+=self.misfit.w[(A,B)]*(log(EI_AB/self.misfit.data[(A,B)]))**2
misfit_quad+=self.misfit.w[(A,B)] * (1-(EI_AB/self.misfit.data[(A,B)]))**2
A2=integrate(misfit_log)
A3=integrate(misfit_quad)
if getMPIRankWorld() == 0:
lslogger.debug("L2, H1, misfit quad, log= %e, %e, %e, %e"%(A0/2, A1/2, A3/2, A2/2))
return (A0+A1+(1-self.weightLogDefect)*A3+self.weightLogDefect*A2)/2
def _getGradient(self, m, *args):
"""
returns the gradient of the cost function. Overwrites `getGradient` of `MeteredCostFunction`
"""
if len(args)==0:
args=self.getArguments(m)
secondary_potential=args[0]
sigma=args[1]
# gradient of the regularization part:
X=self.w1*grad(m)
Y=self.w0*interpolate(m, X.getFunctionSpace())
self.pde.setValue(A=sigma*kronecker(self.pde.getDim()), X=Data(), Y=Data(), y_dirac=Data())
Y2=None
for A, B in self.data.injectionIterator():
if Y2 is None:
Y2=Scalar(0.,self.misfit.w[(A,B)].getFunctionSpace() )
E_AB=-grad(secondary_potential[A]-secondary_potential[B]+self.phi_p[A]-self.phi_p[B], Y2.getFunctionSpace())
EI_AB=length(E_AB)+self.datatol
D=self.misfit.data[(A,B)]
m_log=log(EI_AB/D)
m_quad=1-(EI_AB/D)
self.pde.setValue(X=(self.misfit.w[(A,B)]*(m_log/(EI_AB**2)*self.weightLogDefect - m_quad/(D*EI_AB)*(1-self.weightLogDefect)) ) * E_AB)
ustar=self.pde.getSolution()
Y2-=inner(grad(ustar, E_AB.getFunctionSpace()),E_AB)
if self.Spde:
self.Spde.setValue(Y=Y2*sigma)
Y+=self.Spde.getSolution()
else:
Y+=Y2*sigma/self.alpha0
return ArithmeticTuple(Y, X)
def _getInverseHessianApproximation(self, m, r, *args):
"""
returns an approximation of inverse of the Hessian. Overwrites `getInverseHessianApproximation` of `MeteredCostFunction`
"""
self.Hpde.setValue(X=r[1], Y=r[0])
p=self.Hpde.getSolution()
txt=str(p)
if getMPIRankWorld() == 0:
lslogger.debug("inverse Hessian called. search direction = %s",txt)
return p
class ChargeabilityInversionByField(MeteredCostFunction):
"""
cost function for electric field intensity inversion (aka FullWaver)
"""
provides_inverse_Hessian_approximation=True
def __init__(self, domain, data, L_stations=1., w0=0., w1=1., alpha0=1., alpha1=0., gamma0=0.001, sigma=0.001, region_fixed=Data(), stationsFMT="e%s",
adjustStationLocationsToElementCenter=True, logclip=15, weightLogDefect=0.):
"""
cost function for chargeability inversion based electric fields.
regularization is
int( w0* m^2 + w1*grad(m)^2) where log(sigma/sigma0)=p is given a alpha0*p+alpha1*laplace(p)=m
:domain: pde domain
:data: data, is `fingal.SurveyData`, requires 'GAMMA' if available 'RELERR_GAMMA'
:gamma0: reference modified chargeability
:sigma: conductivity
:w0: weighting L2 regularization int m^2
:w1: weighting H1 regularization int grad(m)^2
:alpha0: regularization factor
:alpha1: regularization factor
:weightLogDefect: weighting factor for the logarithm defect in the cost funtion.
:region_fixed: mask for fixed conductivity. needs to be set if w1>0 and w0=0 or alpha1> and alpha0=0
:adjustStationLocationsToElementCenter: moves the station locations to match element centers.
:stationsFMT: format used to map station keys k to mesh tags stationsFMT%k or None
:logclip: cliping for p to avoid overflow in conductivity calculation
:L_stations: radius of electric field averaging.
"""
super(ChargeabilityInversionByField, self).__init__()
self.datatol=1e-30
self.logclip=logclip
if getMPIRankWorld() == 0:
if weightLogDefect>0:
lslogger.info("weightLogDefect>0 but ignored.")
lslogger.info("building misfit weighting (this will take some time)")
self.weightLogDefect=weightLogDefect
self.sigma=sigma
self.gamma0=gamma0
self.useDifferenceOfFields=False
self.stationsFMT=stationsFMT
self.misfitFunctionSpace=Function(domain)
# setup PDE:
self.pde=setupERTPDE(domain)
x=self.pde.getDomain().getX()[0]
y=self.pde.getDomain().getX()[1]
z=self.pde.getDomain().getX()[2]
self.pde.setValue(q=whereZero(x-inf(x))+whereZero(x-sup(x))+ whereZero(y-inf(y))+whereZero(y-sup(y))+whereZero(z-inf(z)))
self.data=data
# when points are adjusted to match element centers:
adjustmax=0.
if adjustStationLocationsToElementCenter:
station_locations=[]
for s in data.getStationNumeration():
station_locations.append(data.getStationLocation(s))
XStations=Locator(ReducedFunction(domain), station_locations).getX()
########################################################
Xcenter=ReducedFunction(domain).getX()
X=self.misfitFunctionSpace.getX()
self.misfit = lambda: None
self.misfit.data={}
self.misfit.w={}
for AB in self.data.injectionIterator():
self.misfit.w[AB]=Scalar(0.,self.misfitFunctionSpace)
self.misfit.data[AB]=Scalar(0.,self.misfitFunctionSpace)
for M in self.data.getObservationElectrodes():
L_ABS=L_stations
if adjustStationLocationsToElementCenter:
xs=XStations[data.getStationNumber(M)]
adjustmax=max(adjustmax, length(xs-self.data.getStationLocation(M)))
else:
xs=self.data.getStationLocation(M)
mask=whereNegative(interpolate(length(Xcenter-xs)-L_ABS, self.misfitFunctionSpace))
for A,B in self.data.getInjections(M):
GAMMA=self.data.getModifiedChargeabilityData((A,B,M))
RELERR=self.data.getModifiedChargeabilityRelError((A,B,M))
if abs(GAMMA) > 0.:
self.misfit.w[(A,B)].copyWithMask(Scalar(1./RELERR**2,self.misfitFunctionSpace), mask) # 1 where data are measured @M
self.misfit.data[(A,B)].copyWithMask(Scalar(GAMMA,self.misfitFunctionSpace), mask) # data inserted @ M
if adjustStationLocationsToElementCenter and getMPIRankWorld() == 0:
lslogger.info("maximal station adjustment is %e"%adjustmax)
if getMPIRankWorld() == 0:
lslogger.debug("rescaling of misfit weights:")
for AB in self.data.injectionIterator():
self.misfit.data[AB]+=whereNonPositive(self.misfit.w[AB]) # insert 1's to avoid division by zero
s=integrate(self.misfit.w[AB])
assert s>0, "no observation for dipole %s. Maybe you need to increase the value for L_stations."%(str(AB))
if s > 0:
self.misfit.w[AB]*=1./(s*len(self.misfit.w))
# primary potentials:
self.phi_p=self.getElectricPotentials(self.sigma)
#self.secondary_potential=self.getSecondaryElectricPotentials(self.sigma, self.sigma0, self.phi_p)
self.w0=w0
self.w1=w1
self.alpha0=alpha0
self.alpha1=alpha1
# used for Hessian inverse
self.Hpde=setupERTPDE(domain)
self.Hpde.setValue(A=w1*kronecker(3), D=w0, q=region_fixed)
self.Spde=None
if self.alpha1 > 0:
self.Spde=setupERTPDE(domain)
self.Spde.setValue(A=self.alpha1*kronecker(3), D=self.alpha0)
if not self.alpha0>0:
self.Spde.setValue(q=region_fixed)
def getElectricPotentials(self, sigma):
"""
return the primary electric potentials for the injections (A,B) for conductivity sigma
"""
potential={}
self.pde.setValue(A=sigma*kronecker(3), X=Data(), Y=Data(), y_dirac=Data())
for A in self.data.getListOfInjectionStations():
s=Scalar(0.,DiracDeltaFunctions(self.pde.getDomain()))
if self.stationsFMT is None:
s.setTaggedValue(A,1.)
else:
s.setTaggedValue(self.stationsFMT%A,1.)
self.pde.setValue(y_dirac=s)
potential[A]=self.pde.getSolution()
txt=str(potential[A])
if getMPIRankWorld() == 0:
lslogger.debug("primary potential for injection at %d -> %s"%(A,txt))
if getMPIRankWorld() == 0:
lslogger.debug("primary potential for %s injection calculated."%len(potential))
return potential
def getSecondaryElectricPotentials(self, gamma, primary_potentials):
"""
return the secondary electric potentials for the injections (A,B) for conductivity sigma/(1+gamma)
"""
secondary_potential={}
self.pde.setValue(A=self.sigma/(1+gamma)*kronecker(3), X=Data(), Y=Data(), y_dirac=Data())
for A in primary_potentials:
self.pde.setValue(X=self.sigma*gamma/(1+gamma)*grad(primary_potentials[A]))
secondary_potential[A]=self.pde.getSolution()
if getMPIRankWorld() == 0:
lslogger.debug("%s secondary potential calculated"%(len(secondary_potential)))
return secondary_potential
def getChargeability(self, m, isSmoothed=False):
"""
return chargeability (eta) for a given property function m.
If isSmoothed=True it is assumed that m is already smoothed by (alpha0*I+alpha1*laplace)^{-1}. Otherwise smoothing is applied.
"""
gamma=self.getGamma(m, isSmoothed)
return gamma/(1.+gamma)
def getGamma(self, m, isSmoothed=False):
"""
return modified chargeability (gamma) for a given property function m
If isSmoothed=True it is assumed that m is already smoothed by (alpha0*I+alpha1*laplace)^{-1}. Otherwise smoothing is applied.
"""
if not isSmoothed:
if self.Spde :
self.Spde.setValue(Y=m)
p=self.Spde.getSolution()
else:
p=m/self.alpha0
else:
p=m
gamma=self.gamma0*exp(clip(p, minval=-self.logclip, maxval=self.logclip))
return gamma
def _getDualProduct(self, m, r):
"""
dual product of gradient `r` with increment `m`. Overwrites `getDualProduct` of `MeteredCostFunction`
"""
return integrate(r[0]*m + inner(r[1], grad(m)))
def _getNorm(self, m):
"""
returns the norm of property function `m`. Overwrites `getNorm` of `MeteredCostFunction`
"""
return Lsup(m)
def _getArguments(self, m):
"""
returns values that are used for both the forward as well as the gradient calculation
"""
if self.Spde :
self.Spde.setValue(Y=m)
p=self.Spde.getSolution()
else:
p=m/self.alpha0
gamma=self.getGamma(p, isSmoothed=True)
gammai=interpolate(gamma, Function(self.pde.getDomain()))
secondary_potential=self.getSecondaryElectricPotentials(gammai, self.phi_p)
return gammai, secondary_potential,
def _getValue(self, m, *args):
"""
return the value of the cost function. Overwrites `getValue` of `MeteredCostFunction`
"""
if len(args)==0:
args=self.getArguments(m)
gammai=args[0]
secondary_potential=args[1]
mi=interpolate(m, Function(self.pde.getDomain()))
A1=self.w1*integrate(length(grad(m))**2)
A0=self.w0*integrate(mi**2)
misfit=Scalar(0.,self.misfitFunctionSpace )
for A, B in self.data.injectionIterator():
E =-grad(self.phi_p[A]-self.phi_p[B], misfit.getFunctionSpace())
DE=-grad(secondary_potential[A]-secondary_potential[B], misfit.getFunctionSpace())
L_E2=length(E)**2
mfquad=1 - safeDiv( inner(DE, E), self.misfit.data[(A,B)]*L_E2)
misfit+=self.misfit.w[(A,B)]*mfquad**2
A2=integrate(misfit)
if lslogger.isEnabledFor(logging.DEBUG):
strgamma=str(gammai)
strm=str(mi)
if getMPIRankWorld() == 0:
lslogger.debug("gamma = %s"%strgamma)
lslogger.debug("m = %s"%strm)
lslogger.debug("L2, H1, misfit quad = %e, %e, %e"%(A0/2, A1/2, A2/2))
return (A0+A1+A2)/2
def _getGradient(self, m, *args):
"""
returns the gradient of the cost function. Overwrites `getGradient` of `MeteredCostFunction`
"""
if len(args)==0:
args=self.getArguments(m)
gammai=args[0]
secondary_potential=args[1]
mi=interpolate(m, Function(self.pde.getDomain()))
# gradient of the regularization part:
X=self.w1*grad(m)
Y=self.w0*mi
self.pde.setValue(A=self.sigma/(1+gammai)*kronecker(3), X=Data(), Y=Data(), y_dirac=Data())
Y2=Scalar(0.,self.misfitFunctionSpace )
for A, B in self.data.injectionIterator():
E =-grad(self.phi_p[A]-self.phi_p[B], Y2.getFunctionSpace())
DE=-grad(secondary_potential[A]-secondary_potential[B], Y2.getFunctionSpace())
L_E2=length(E)**2
mfquad=1 - safeDiv( inner(DE, E), self.misfit.data[(A,B)]*L_E2)
#self.pde.setValue(X=self.misfit.w[(A,B)]*(self.misfit.data[(A,B)]*L_E2 - inner(DE, E))/(L_E2+self.datatol**2)**2* E)
self.pde.setValue(X=self.misfit.w[(A,B)]*safeDiv(mfquad, self.misfit.data[(A,B)]*L_E2) * E)
ustar=self.pde.getSolution()
Y2+=-inner(grad(ustar, E.getFunctionSpace()),E+DE)
if self.Spde:
self.Spde.setValue(Y=Y2*gammai/(1+gammai)**2*self.sigma)
Y+=self.Spde.getSolution()
else:
Y+=Y2*gammai*self.sigma/self.alpha0
return ArithmeticTuple(Y, X)
def _getInverseHessianApproximation(self, m, r, *args):
"""
returns an approximation of inverse of the Hessian. Overwrites `getInverseHessianApproximation` of `MeteredCostFunction`
"""
self.Hpde.setValue(X=r[1], Y=r[0])
p=self.Hpde.getSolution()
txt=str(p)
if getMPIRankWorld() == 0:
lslogger.debug("inverse Hessian called. search direction = %s",txt)
return p
class DCInversionByField(MeteredCostFunction):
"""
cost function for electric field intensity inversion (aka FullWaver)
"""
provides_inverse_Hessian_approximation=True
def __init__(self, domain, data, L_stations=1., w0=0., w1=1., alpha0=1., alpha1=0., sigma0=.001, region_fixed=Data(), stationsFMT="e%s",
adjustStationLocationsToElementCenter=True, useLogDefect=True):
"""
cost funtion for ERT inversion
:domain: pde domain
:data: data, is ERTSurveyData object supporting makePrediction
:w0: weighting L2 regularization
:w1: weighting H1 regularization
:sigma0: reference conductivity
:region_fixed: mask for fixed conductivities
:stationsFMT: format used to map station keys k to mesh tags stationsFMT%k or None
"""
super(FieldInversion, self).__init__()
self.datatol=1e-30
self.sigma0=sigma0
self.stationsFMT=stationsFMT
self.useLogDefect=useLogDefect
if self.useLogDefect:
lslogger.info("Misfit is using logarithm.")
else:
lslogger.info("Misfit is using norm relative difference.")
# setup PDE:
self.pde=setupERTPDE(domain)
x=self.pde.getDomain().getX()[0]
y=self.pde.getDomain().getX()[1]
z=self.pde.getDomain().getX()[2]
self.pde.setValue(q=whereZero(x-inf(x))+whereZero(x-sup(x))+ whereZero(y-inf(y))+whereZero(y-sup(y))+whereZero(z-inf(z)))
self.data=data
# when points are adjusted:
adjustmax=0.
if adjustStationLocationsToElementCenter:
station_locations=[]
for s in data.getStationNumeration():
station_locations.append(data.getStationLocation(s))
XStations=Locator(ReducedFunction(domain), station_locations).getX()
########################################################
lslogger.info("building misfit weighting (this will take some time)")
Xcenter=ReducedFunction(domain).getX()
X=Function(domain).getX()
self.misfit = lambda: None
self.misfit.data={}
self.misfit.w={}
for AB in self.data.injectionIterator():
self.misfit.w[AB]=Scalar(0.,X.getFunctionSpace())
self.misfit.data[AB]=Vector(0.,X.getFunctionSpace())
for M in self.data.getObservationElectrodes():
L_ABS=L_stations
if adjustStationLocationsToElementCenter:
xs=XStations[data.getStationNumber(M)]
adjustmax=max(adjustmax, length(xs-self.data.getStationLocation(M)))
else:
xs=self.data.getStationLocation(M)
mask=whereNegative(interpolate(length(Xcenter-xs)-L_ABS, X.getFunctionSpace()))
for A,B in self.data.getInjections(M):
E0, E1, E2=self.data.getFieldData((A,B,M))
n=E0**2+E1**2+E2**2
if n > 0:
self.misfit.w[(A,B)].copyWithMask(Scalar(1./n,self.misfit.w[AB].getFunctionSpace()), mask) # 1 where data are measured @M
self.misfit.data[(A,B)].copyWithMask(Vector((E0, E1, E2), self.misfit.w[AB].getFunctionSpace()), mask*[1,1,1]) # data inserted @ M
#self.misfit.data[(A,B)]=self.misfit.data[(A,B)]*(1-mask)+mask*Vector((E0, E1, E2), self.misfit.w[AB].getFunctionSpace())
lslogger.debug("rescaling of misfit weights:")
for AB in self.data.injectionIterator():
s=integrate(self.misfit.w[AB]*length(self.misfit.data[AB])**2)
#print(AB, s, integrate(length(self.misfit.w[(A,B)]*self.misfit.data[AB])**2))
#self.misfit.data[AB]+=(1-wherePositive(self.misfit.w[AB])) # one inserted to avoid division by zero in misfit
assert s>0, "no observation for dipole %s. Maybe you need to increase the value for L_stations."%(str(AB))
if s > 0:
self.misfit.w[AB]*=1./(s*len(self.misfit.w))
# primary potentials:
lslogger.info("maximal station adjustment is %e"%adjustmax)
lslogger.info("building primary electric fields (this will take some time)")
self.phi_p=self.getPrimaryElectricPotentials(sigma0)
lslogger.info("Primary potentials for %d injections calculated."%(len(self.phi_p) ))
self.w0=w0
self.w1=w1
self.alpha0=alpha0
self.alpha1=alpha1
# used for Hessian inverse
self.Hpde=setupERTPDE(domain, poisson=(abs(w1)>0) )
self.Hpde.setValue(A=w1*kronecker(3), D=w0, q=region_fixed)
if self.alpha1 > 0:
self.Spde=setupERTPDE(domain)
self.Spde.setValue(A=self.alpha1*kronecker(3), D=self.alpha0)
if not self.alpha0 > 0:
self.Spde.setValue(q=region_fixed)
else:
self.Spde=None
def getPrimaryElectricPotentials(self, sigma):
"""
return the primary electric for the injections (A,B)
"""
primary_potential={}
self.pde.setValue(A=sigma*kronecker(3), y_dirac=Data(), Y=Data(), X=Data())
for A in self.data.getListOfInjectionStations():
s=Scalar(0.,DiracDeltaFunctions(self.pde.getDomain()))
if self.stationsFMT is None:
s.setTaggedValue(A,1.)
else:
s.setTaggedValue(self.stationsFMT%A,1.)
self.pde.setValue(y_dirac=s)
primary_potential[A]=self.pde.getSolution()
lslogger.debug("primary potential for injection at %d -> %s"%(A,str(primary_potential[A])))
return primary_potential
def getSecondaryElectricPotentials(self, sigma, sigma0, primary_potentials):
secondary_potential={}
print("getSecondaryElectricPotentials: sigma=",str(sigma))
self.pde.setValue(A=sigma*kronecker(self.pde.getDim()), X=Data(), Y=Data(), y_dirac=Data())
for A in primary_potentials:
self.pde.setValue(X=(sigma0-sigma)*grad(primary_potentials[A]))
secondary_potential[A]=self.pde.getSolution()
return secondary_potential
def getSigma(self, m, isSmoothed=False):
"""
return the conductivity for a given property function m
"""
if not isSmoothed:
if self.Spde :
self.Spde.setValue(Y=m)
p=self.Spde.getSolution()
else:
p=m*self.alpha0
else:
p=m
return self.sigma0*exp(p)
def _getDualProduct(self, m, r):
return integrate(r[0]*m + inner(r[1], grad(m)))
def _getNorm(self, m):
return Lsup(m)
def _getArguments(self, m):
if self.Spde:
self.Spde.setValue(Y=m)
p=self.Spde.getSolution()
ppi=clip(interpolate(p, Function(self.pde.getDomain())), minval=-self.logclip, maxval=self.logclip)
else:
ppi=clip(interpolate(m/self.alpha0, Function(self.pde.getDomain())), minval=-self.logclip, maxval=self.logclip)
sigma=self.getSigma(ppi, isSmoothed=True)
secondary_potential=self.getSecondaryElectricPotentials(sigma, self.sigma0, self.phi_p)
return secondary_potential, sigma, ppi
def _getValue(self, m, *args):
if len(args)==0:
args=self.getArguments(m)
secondary_potential=args[0]
sigma=args[1]
ppi=args[2]
mi=interpolate(m, Function(self.pde.getDomain()))
A1=self.w1*integrate(length(grad(m))**2)
A0=self.w0*integrate(mi**2)
misfit=None
for A,B in self.data.injectionIterator():
if misfit is None:
misfit=Scalar(0.,self.misfit.w[(A,B)].getFunctionSpace() )
E_AB=-grad(secondary_potential[A]-secondary_potential[B]+self.phi_p[A]-self.phi_p[B], self.misfit.w[(A,B)].getFunctionSpace())
diff=self.misfit.data[(A,B)]-E_AB
misfit+=self.misfit.w[(A,B)] * length(diff)**2
A2=integrate(misfit)
lslogger.info("sigma = %s"%(str(sigma)))
lslogger.debug("p = %s"%(str(ppi)))
lslogger.debug("m = %s"%(str(m)))
lslogger.debug("L2, H1, misfit= %e, %e, %e"%(A0/2, A1/2, A2/2))
return (A0+A1+A2)/2
def _getGradient(self, m, *args):
if len(args)==0:
args=self.getArguments(m)
secondary_potential=args[0]
sigma=args[1]
# gradient of the regularization part:
X=self.w1*grad(m)
Y=self.w0*interpolate(m, X.getFunctionSpace())
self.pde.setValue(A=sigma*kronecker(self.pde.getDim()), X=Data(), Y=Data(), y_dirac=Data())
Y2=None
for A, B in self.data.injectionIterator():
if Y2 is None:
Y2=Scalar(0.,self.misfit.w[(A,B)].getFunctionSpace() )
E_AB=-grad(secondary_potential[A]-secondary_potential[B]+self.phi_p[A]-self.phi_p[B], Y2.getFunctionSpace())
diff=self.misfit.data[(A,B)]-E_AB
self.pde.setValue(X=self.misfit.w[(A,B)]*diff )
ustar=self.pde.getSolution()
Y2+=inner(grad(ustar, E_AB.getFunctionSpace()),E_AB)
if self.Spde:
self.Spde.setValue(Y=Y2*sigma)
Y+=self.Spde.getSolution()
else:
Y+=Y2*sigma/self.alpha0
return ArithmeticTuple(Y, X)
def _getInverseHessianApproximation(self, m, r, *args):
self.Hpde.setValue(X=r[1], Y=r[0])
#saveVTK("test", PPP=self.Hpde.getRightHandSide())
p=self.Hpde.getSolution()
#p*=1./Lsup(p)
lslogger.debug("inverse Hessian called. search direction = %s",p)
return p
| 43.388078
| 156
| 0.602383
| 4,270
| 35,665
| 4.980094
| 0.085948
| 0.032918
| 0.021726
| 0.010722
| 0.816694
| 0.787867
| 0.764637
| 0.751046
| 0.731719
| 0.699882
| 0
| 0.018029
| 0.270601
| 35,665
| 821
| 157
| 43.440926
| 0.799416
| 0.182981
| 0
| 0.756007
| 0
| 0
| 0.054784
| 0.002224
| 0
| 0
| 0
| 0
| 0.007394
| 1
| 0.060998
| false
| 0
| 0.014787
| 0.003697
| 0.138632
| 0.001848
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
06fc4eba810668b2d11ff7c5397013dfe56f2902
| 276
|
py
|
Python
|
artgate/platform/__init__.py
|
Fassty/artgate
|
f1f853e9eec985fcd883dd27a0a5f6a610660e50
|
[
"MIT"
] | null | null | null |
artgate/platform/__init__.py
|
Fassty/artgate
|
f1f853e9eec985fcd883dd27a0a5f6a610660e50
|
[
"MIT"
] | null | null | null |
artgate/platform/__init__.py
|
Fassty/artgate
|
f1f853e9eec985fcd883dd27a0a5f6a610660e50
|
[
"MIT"
] | null | null | null |
from artgate.platform.base import AbstractEnvConnector
from artgate.platform.android import *
from artgate.platform.ios import *
from artgate.platform.linux import LinuxEnvConnector
from artgate.platform.macos import *
from artgate.platform.windows import WindowsEnvConnector
| 39.428571
| 56
| 0.858696
| 33
| 276
| 7.181818
| 0.393939
| 0.278481
| 0.481013
| 0.316456
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 276
| 6
| 57
| 46
| 0.940476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b0c71893ac8c17de056e56914b10e1592e6d3da7
| 1,215
|
py
|
Python
|
tests/test_nuples.py
|
Contexte/typographeur
|
f4220ef329245b375a65e486ab0b8a93afcd219a
|
[
"MIT"
] | 14
|
2018-06-15T09:28:32.000Z
|
2021-08-02T09:21:42.000Z
|
tests/test_nuples.py
|
Contexte/typographeur
|
f4220ef329245b375a65e486ab0b8a93afcd219a
|
[
"MIT"
] | 21
|
2018-06-15T12:35:58.000Z
|
2021-02-24T22:22:27.000Z
|
tests/test_nuples.py
|
Contexte/typographeur
|
f4220ef329245b375a65e486ab0b8a93afcd219a
|
[
"MIT"
] | 2
|
2020-06-25T14:42:09.000Z
|
2021-02-08T16:06:42.000Z
|
import pytest
from typographeur import typographeur
@pytest.mark.parametrize("input,expected", [
('hello???', 'hello ???'),
('hello ???', 'hello ???'),
('hello ???', 'hello ???'), # Fine insecable
('hello ??', 'hello ???'),
('hello ??????', 'hello ???'),
])
def test_triple_question(input, expected):
output = typographeur(input)
assert output == expected
@pytest.mark.parametrize("input,expected", [
('hello!!!', 'hello !!!'),
('hello !!!', 'hello !!!'),
('hello !!!', 'hello !!!'), # Fine insecable
('hello !!', 'hello !!!'),
('hello !!!!!', 'hello !!!'),
])
def test_triple_exclamation(input, expected):
output = typographeur(input)
assert output == expected
# Let's agree on something: this kind of writings doesn't exist.
@pytest.mark.parametrize("input,expected", [
('hello;;;', 'hello ;;;'),
('hello ;;;', 'hello ;;;'),
('hello ;;;', 'hello ;;;'), # Fine insecable
('hello ;;', 'hello ;;'),
('hello ;;;;;', 'hello ;;;;;'),
])
def test_triple_semicolon(input, expected):
output = typographeur(input)
assert output == expected
| 30.375
| 64
| 0.565432
| 120
| 1,215
| 5.675
| 0.266667
| 0.220264
| 0.30837
| 0.251101
| 0.819383
| 0.819383
| 0.819383
| 0.819383
| 0.572687
| 0.572687
| 0
| 0.059113
| 0.164609
| 1,215
| 39
| 65
| 31.153846
| 0.611823
| 0.088066
| 0
| 0.65625
| 0
| 0
| 0.366274
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 1
| 0.09375
| false
| 0
| 0.0625
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b0c9de14e032959394e67af8d47dcec635056043
| 20,561
|
py
|
Python
|
TO.py
|
LHoBiz/ols_engine
|
9bdbd827f7be17aee95d416255a7f483472c4315
|
[
"MIT"
] | 1
|
2022-01-05T07:38:06.000Z
|
2022-01-05T07:38:06.000Z
|
TO.py
|
LHoBiz/ols_engine
|
9bdbd827f7be17aee95d416255a7f483472c4315
|
[
"MIT"
] | null | null | null |
TO.py
|
LHoBiz/ols_engine
|
9bdbd827f7be17aee95d416255a7f483472c4315
|
[
"MIT"
] | null | null | null |
# -*- coding: cp1252 -*-
import math
import OLSDims
import EnvSettings
from osgeo import osr
import mdl
ip = mdl.Data()
NTOL=ip.NTOL
STOL=ip.STOL
AppOLSNAME=OLSDims.AppDim.AppOLSNAME
AppOLSDIMS=OLSDims.AppDim.AppOLSDIMS
TOOLSNAME=OLSDims.TODim.TOOLSNAME
f=ip.f
ToOLS = OLSDims.TODim.ToOLS
AppOLSNAME=OLSDims.AppDim.AppOLSNAME
AppOLSDIMS=OLSDims.AppDim.AppOLSDIMS
NRunwayInfo=ip.NRunwayInfo
SRunwayInfo=ip.SRunwayInfo
NIns = ip.NIns
if NIns == 'Y':
NPrc=ip.NPrc
if NPrc != 'N':
NBLDist=ip.NBLDist
CN = ip.CN
DayOnly = ip.CN
CL=ip.CL
RED=ip.RED
NMTOW5700kg = ip.NMTOW5700kg
NMTOW22700kg=ip.NMTOW22700kg
SMTOW5700kg = ip.SMTOW5700kg
SMTOW22700kg=ip.SMTOW22700kg
RPT = ip.RPT
SIns = ip.SIns
if SIns == 'Y':
SPrc=ip.SPrc
if SPrc != 'N':
SBLDist=ip.SBLDist
RPT = ip.RPT
RWY_WID=ip.RWY_WID
RSW=ip.RSW
NE=ip.NE
SE=ip.SE
NTE=ip.NTE
NTN=ip.NTN
STE=ip.STE
STN=ip.STN
ARP=ip.ARP
SE=ip.SE
NE=ip.NE
NTOAlt=ip.NTOAlt
STOAlt=ip.STOAlt
STOTurn15d=ip.STOTurn15d
NTOTurn15d=ip.NTOTurn15d
zone=ip.zone
KML_NAME=ip.KML_NAME
completeName=ip.completeName
##STOInEdge=ip.STOInEdge
##NTOInEdge=ip.NTOInEdge
RwyLen = math.sqrt((NTE-STE)*(NTE-STE) + (NTN-STN)*(NTN-STN))
NCLWY=ip.NCLWY
SCLWY=ip.SCLWY
def NorthTO(NToOls,accur):
ToOls=NToOls
Div = ToOls[2][0]
Slope = ToOls[5][0]
s = []
Square = []
Elev = NE
ToOls=NToOls
TOTurn15d=NTOTurn15d
TOAlt = NTOAlt
TOL = ToOls[4][0]
MTOW22700kg = NMTOW22700kg
Ins=NIns
ToOls[1][0] = NCLWY
if MTOW22700kg == 'N' and TOAlt == 'N':
innEdge = mdl.F_M(150,1)
if MTOW22700kg == 'Y' and TOAlt == 'N':
innEdge = mdl.F_M(250,1)
if TOAlt == 'N':
if TOTurn15d == 'N':
if TOL*Slope + innEdge/2 < mdl.F_M(1000,1):
outEdge = TOL*Slope + innEdge/2
elif TOL*Slope + innEdge/2 >= mdl.F_M(1000,1):
outEdge = mdl.F_M(1000,1)
if TOTurn15d == 'Y':
print ('Stop - another method is required to determine take-off area')
if TOAlt == 'Y':
if Ins == 'Y' or TOTurn15d == 'Y':
outEdge = mdl.F_M(900,1)
else:
outEdge = mdl.F_M(600,1)
innEdge = mdl.F_M(90,1)
innEdge = ToOls[0][0]
outEdge = ToOls[3][0]
J = range(1+int(math.ceil(TOL/mdl.iN(accur))))
I = range(1+int(math.ceil((outEdge/2)/mdl.iS(accur))))
for i in I:
K = []
T = []
for j in J:
D1 = ((outEdge-innEdge)/2)/Div + ToOls[1][0]
D10 = D1 - accur
D11 = D1 + accur
D = (TOL+ToOls[1][0]) - j*accur
Dm1= (TOL+ToOls[1][0]) - (j-1)*accur
Dp1= (TOL+ToOls[1][0]) - (j+1)*accur
H = Slope * (D-ToOls[1][0]) + NE
L = (innEdge/2)+(Div*(D-ToOls[1][0])) - i*accur
L1 = (innEdge/2)+(Div*(D1-ToOls[1][0])) - i*accur
H1 = Slope * (D1-ToOls[1][0]) + NE
if L > 0 and outEdge/2 - i*accur > 0:
## area 1
if D > D11:
if L >= outEdge/2 - i*accur:
L = outEdge/2 - i*accur
K.append([D,L,H])
## area 2
if D <= D11 and D > D1:
if L >= outEdge/2- i*accur:
L = outEdge/2 - i*accur
K.append([D,L,H])
K.append([D1,L1,H1])
## area 3
if D <= D1 and D > D10:
if L >= outEdge/2- i*accur:
L = outEdge/2 - i*accur
K.append([D1,L1,H1])
K.append([D,L,H])
## area 4
if D <= D10 and D > ToOls[1][0]:
if L >= outEdge/2- i*accur:
L = outEdge/2 - i*accur
K.append([D,L,H])
## area 5
if D <= ToOls[1][0]:
if L >= outEdge/2- i*accur:
L = outEdge/2 - i*accur
K.append([D,L,H])
K.append([ToOls[1][0],innEdge/2,Elev])
if L <= 0 or outEdge/2 - i*accur <= 0:
L = 0
L1 = 0
if D > D11:
K.append([D,L,H])
## area 2
if D <= D11 and D > D1:
K.append([D,L,H])
K.append([D1,L1,H1])
## area 3
if D <= D1 and D > D10:
K.append([D1,L1,H1])
K.append([D,L,H])
## area 4
if D <= D10 and D > ToOls[1][0]:
K.append([D,L,H])
## area 5
if D <= ToOls[1][0]:
K.append([D,L,H])
K.append([ToOls[1][0],innEdge/2,Elev])
if L == 0:
T.append(j)
s.append(K)
if len(T) > 0:
J = range(T[0]+1)
F = [1,-1]
for n in range(2):
#folder
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '<Folder>\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '<ScreenOverlay>\n')
f.write( '<name>Runway: Code '+str(int(CN))+CL+NRunwayInfo+'</name>\n')
f.write( '<visibility>0</visibility>\n')
f.write('<overlayXY x="0" y="0" xunits="fraction" yunits="fraction"/>\n')
f.write('<screenXY x="25" y="95" xunits="pixels" yunits="pixels"/>\n')
f.write('<rotationXY x="0.5" y="0.5" xunits="fraction" yunits="fraction"/>\n')
f.write('<size x="0" y="0" xunits="pixels" yunits="pixels"/>\n')
f.write('<styleUrl>#KMLStyler</styleUrl>\n')
f.write('<ExtendedData>\n')
f.write('<SchemaData schemaUrl="#NewFeatureType">\n')
f.write('<SimpleData name="Surface">Dimensions</SimpleData>\n')
f.write('<SimpleData name="'+TOOLSNAME[0][0]+'">-</SimpleData>\n')
for b in range(len(TOOLSNAME[1])):
f.write('<SimpleData name="'+TOOLSNAME[1][b][0]+'">'+str(ToOls[b][0])+'</SimpleData>\n')
f.write('</SchemaData>\n')
f.write('</ExtendedData>\n')
f.write('</ScreenOverlay>\n')
if n == 0:
f.write( '<name>North'+TOOLSNAME[0][0]+'1</name>\n')
if n == 1:
f.write( '<name>North'+TOOLSNAME[0][0]+'2</name>\n')
hero = []
I = range(len(s))
for i in I:
J = range(len(s[i]))
for j in J:
if i < max(I):
if j < (len(s[i+1])-1):
## print 'flag1',(len(s[i+1])-1),j < (len(s[i+1])-1)
if n == 0:
xx =[
[s[i][j][0]*F[1], s[i][j][1]*F[0], s[i][j][2]],
[s[i][j+1][0]*F[1], s[i][j+1][1]*F[0], s[i][j+1][2]],
[s[i+1][j+1][0]*F[1],s[i+1][j+1][1]*F[0], s[i+1][j+1][2]],
[s[i+1][j][0]*F[1], s[i+1][j][1]*F[0], s[i+1][j][2]],
[s[i][j][0]*F[1], s[i][j][1]*F[0], s[i][j][2]]
]
ns = 'n'
if n == 1:
xx =[
[s[i][j][0]*F[1], s[i][j][1]*F[1], s[i][j][2]],
[s[i][j+1][0]*F[1], s[i][j+1][1]*F[1], s[i][j+1][2]],
[s[i+1][j+1][0]*F[1],s[i+1][j+1][1]*F[1], s[i+1][j+1][2]],
[s[i+1][j][0]*F[1], s[i+1][j][1]*F[1], s[i+1][j][2]],
[s[i][j][0]*F[1], s[i][j][1]*F[1], s[i][j][2]]
]
ns = 'n'
f.write( "<Placemark>\n")
f.write( "<name>n="+str(n)+" i="+str(i)+" j="+str(j)+"</name>\n")
f.write( "<styleUrl>#m_ylw-pushpin</styleUrl>\n")
##extended data
H = []
for h in range(len(xx)):
e = xx[h][2]
Utm = mdl.toUTM(NTE,NTN,STE,STN,ARP,SE,NE,xx[h][0],xx[h][1],xx[h][2],ns)
Wgs = list(mdl.U_W(Utm[0],Utm[1],zone, e))
H.append(Wgs[2])
Hn = min(H)
Hm = max(H)
f.write( "<ExtendedData>")
f.write( '<SchemaData schemaUrl="#S_t1_ISDDDDDDDDSSS">')
f.write( '<SimpleData name="Surface">'+TOOLSNAME[0][0]+'</SimpleData>')
f.write( '<SimpleData name="Z-min">'+str(Hn)+'</SimpleData>')
f.write( '<SimpleData name="Z-max">'+str(Hm)+'</SimpleData>')
f.write( '</SchemaData>')
f.write( "</ExtendedData>")
f.write( "<Polygon>\n")
f.write( "<altitudeMode>absolute</altitudeMode>\n")
f.write( "<outerBoundaryIs>\n")
f.write( "<LinearRing>\n")
f.write( "<coordinates>\n")
for h in range(len(xx)):
e = xx[h][2]
Utm = mdl.toUTM(NTE,NTN,STE,STN,ARP,SE,NE,xx[h][0],xx[h][1],xx[h][2],ns)
Wgs = list(mdl.U_W(Utm[0],Utm[1],zone, e))
H.append(Wgs[2])
f.write(str(Wgs[0])+","+str(Wgs[1])+","+str(Wgs[2]))
f.write( "\n")
f.write( "</coordinates>\n")
f.write( "</LinearRing>\n")
f.write( "</outerBoundaryIs>\n")
f.write( "</Polygon>\n")
f.write( "</Placemark>\n")
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '</Folder>\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
def SouthTO(SToOls,accur):
ToOls=SToOls
Div = ToOls[2][0]
Slope = ToOls[5][0]
s = []
Square = []
Elev = SE
ToOls=SToOls
TOTurn15d=STOTurn15d
TOAlt = STOAlt
TOL = ToOls[4][0]
MTOW22700kg = SMTOW22700kg
Ins=SIns
ToOls[1][0] =SCLWY
if MTOW22700kg == 'N' and TOAlt == 'N':
innEdge = mdl.F_M(150,1)
if MTOW22700kg == 'Y' and TOAlt == 'N':
innEdge = mdl.F_M(250,1)
if TOAlt == 'N':
if TOTurn15d == 'N':
if TOL*Slope + innEdge/2 < mdl.F_M(1000,1):
outEdge = TOL*Slope + innEdge/2
elif TOL*Slope + innEdge/2 >= mdl.F_M(1000,1):
outEdge = mdl.F_M(1000,1)
if TOTurn15d == 'Y':
print ('Stop - another method is required to determine take-off area')
if TOAlt == 'Y':
if Ins == 'Y' or TOTurn15d == 'Y':
outEdge = mdl.F_M(900,1)
else:
outEdge = mdl.F_M(600,1)
innEdge = mdl.F_M(90,1)
innEdge = ToOls[0][0]
outEdge = ToOls[3][0]
J = range(1+int(math.ceil(TOL/mdl.iN(accur))))
I = range(1+int(math.ceil((outEdge/2)/mdl.iS(accur))))
for i in I:
K = []
T = []
for j in J:
D1 = ((outEdge-innEdge)/2)/Div + ToOls[1][0]
D10 = D1 - accur
D11 = D1 + accur
D = (TOL+ToOls[1][0]) - j*accur
Dm1= (TOL+ToOls[1][0]) - (j-1)*accur
Dp1= (TOL+ToOls[1][0]) - (j+1)*accur
H = Slope * (D-ToOls[1][0]) + Elev
L = (innEdge/2)+(Div*(D-ToOls[1][0])) - i*accur
L1 = (innEdge/2)+(Div*(D1-ToOls[1][0])) - i*accur
H1 = Slope * (D1-ToOls[1][0]) + Elev
if L > 0 and outEdge/2 - i*accur > 0:
## area 1
if D > D11:
if L >= outEdge/2 - i*accur:
L = outEdge/2 - i*accur
K.append([D,L,H])
## area 2
if D <= D11 and D > D1:
if L >= outEdge/2- i*accur:
L = outEdge/2 - i*accur
K.append([D,L,H])
K.append([D1,L1,H1])
## area 3
if D <= D1 and D > D10:
if L >= outEdge/2- i*accur:
L = outEdge/2 - i*accur
K.append([D1,L1,H1])
K.append([D,L,H])
## area 4
if D <= D10 and D > ToOls[1][0]:
if L >= outEdge/2- i*accur:
L = outEdge/2 - i*accur
K.append([D,L,H])
## area 5
if D <= ToOls[1][0]:
if L >= outEdge/2- i*accur:
L = outEdge/2 - i*accur
K.append([D,L,H])
K.append([ToOls[1][0],innEdge/2,Elev])
if L <= 0 or outEdge/2 - i*accur <= 0:
L = 0
L1 = 0
if D > D11:
K.append([D,L,H])
## area 2
if D <= D11 and D > D1:
K.append([D,L,H])
K.append([D1,L1,H1])
## area 3
if D <= D1 and D > D10:
K.append([D1,L1,H1])
K.append([D,L,H])
## area 4
if D <= D10 and D > ToOls[1][0]:
K.append([D,L,H])
## area 5
if D <= ToOls[1][0]:
K.append([D,L,H])
K.append([ToOls[1][0],innEdge/2,Elev])
if L == 0:
T.append(j)
s.append(K)
if len(T) > 0:
J = range(T[0]+1)
F = [1,-1]
for n in range(2):
#folder
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '<Folder>\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '<ScreenOverlay>\n')
f.write( '<name>Runway: Code '+str(int(CN))+CL+NRunwayInfo+'</name>\n')
f.write( '<visibility>0</visibility>\n')
f.write('<overlayXY x="0" y="0" xunits="fraction" yunits="fraction"/>\n')
f.write('<screenXY x="25" y="95" xunits="pixels" yunits="pixels"/>\n')
f.write('<rotationXY x="0.5" y="0.5" xunits="fraction" yunits="fraction"/>\n')
f.write('<size x="0" y="0" xunits="pixels" yunits="pixels"/>\n')
f.write('<styleUrl>#KMLStyler</styleUrl>\n')
f.write('<ExtendedData>\n')
f.write('<SchemaData schemaUrl="#NewFeatureType">\n')
f.write('<SimpleData name="Surface">Dimensions</SimpleData>\n')
f.write('<SimpleData name="'+TOOLSNAME[0][0]+'">-</SimpleData>\n')
for b in range(len(TOOLSNAME[1])):
f.write('<SimpleData name="'+TOOLSNAME[1][b][0]+'">'+str(ToOls[b][0])+'</SimpleData>\n')
f.write('</SchemaData>\n')
f.write('</ExtendedData>\n')
f.write('</ScreenOverlay>\n')
if n == 0:
f.write( '<name>South'+TOOLSNAME[0][0]+'1</name>\n')
if n == 1:
f.write( '<name>South'+TOOLSNAME[0][0]+'2</name>\n')
hero = []
I = range(len(s))
for i in I:
J = range(len(s[i]))
for j in J:
if i < max(I):
if j < (len(s[i+1])-1):
## print 'flag1',(len(s[i+1])-1),j < (len(s[i+1])-1)
if n == 0:
xx =[
[s[i][j][0]*F[1], s[i][j][1]*F[0], s[i][j][2]],
[s[i][j+1][0]*F[1], s[i][j+1][1]*F[0], s[i][j+1][2]],
[s[i+1][j+1][0]*F[1],s[i+1][j+1][1]*F[0], s[i+1][j+1][2]],
[s[i+1][j][0]*F[1], s[i+1][j][1]*F[0], s[i+1][j][2]],
[s[i][j][0]*F[1], s[i][j][1]*F[0], s[i][j][2]]
]
ns = 's'
if n == 1:
xx =[
[s[i][j][0]*F[1], s[i][j][1]*F[1], s[i][j][2]],
[s[i][j+1][0]*F[1], s[i][j+1][1]*F[1], s[i][j+1][2]],
[s[i+1][j+1][0]*F[1],s[i+1][j+1][1]*F[1], s[i+1][j+1][2]],
[s[i+1][j][0]*F[1], s[i+1][j][1]*F[1], s[i+1][j][2]],
[s[i][j][0]*F[1], s[i][j][1]*F[1], s[i][j][2]]
]
ns = 's'
f.write( "<Placemark>\n")
f.write( "<name>n="+str(n)+" i="+str(i)+" j="+str(j)+"</name>\n")
f.write( "<styleUrl>#m_ylw-pushpin</styleUrl>\n")
##extended data
H = []
for h in range(len(xx)):
e = xx[h][2]
Utm = mdl.toUTM(NTE,NTN,STE,STN,ARP,SE,NE,xx[h][0],xx[h][1],xx[h][2],ns)
Wgs = list(mdl.U_W(Utm[0],Utm[1],zone, e))
H.append(Wgs[2])
Hn = min(H)
Hm = max(H)
f.write( "<ExtendedData>")
f.write( '<SchemaData schemaUrl="#S_t1_ISDDDDDDDDSSS">')
f.write( '<SimpleData name="Surface">'+TOOLSNAME[0][0]+'</SimpleData>')
f.write( '<SimpleData name="Z-min">'+str(Hn)+'</SimpleData>')
f.write( '<SimpleData name="Z-max">'+str(Hm)+'</SimpleData>')
f.write( '</SchemaData>')
f.write( "</ExtendedData>")
f.write( "<Polygon>\n")
f.write( "<altitudeMode>absolute</altitudeMode>\n")
f.write( "<outerBoundaryIs>\n")
f.write( "<LinearRing>\n")
f.write( "<coordinates>\n")
for h in range(len(xx)):
e = xx[h][2]
Utm = mdl.toUTM(NTE,NTN,STE,STN,ARP,SE,NE,xx[h][0],xx[h][1],xx[h][2],ns)
Wgs = list(mdl.U_W(Utm[0],Utm[1],zone, e))
H.append(Wgs[2])
f.write(str(Wgs[0])+","+str(Wgs[1])+","+str(Wgs[2]))
f.write( "\n")
f.write( "</coordinates>\n")
f.write( "</LinearRing>\n")
f.write( "</outerBoundaryIs>\n")
f.write( "</Polygon>\n")
f.write( "</Placemark>\n")
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '</Folder>\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
f.write( '\n')
| 35.207192
| 100
| 0.372599
| 2,665
| 20,561
| 2.863415
| 0.071295
| 0.11165
| 0.103656
| 0.060805
| 0.869611
| 0.864107
| 0.863714
| 0.860962
| 0.843664
| 0.843664
| 0
| 0.060927
| 0.439619
| 20,561
| 583
| 101
| 35.267581
| 0.601371
| 0.019552
| 0
| 0.844538
| 0
| 0.004202
| 0.122047
| 0.02795
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004202
| false
| 0
| 0.010504
| 0
| 0.014706
| 0.004202
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9ffeea6c6291597084da9ee158dc0dd38f349e0c
| 21,480
|
py
|
Python
|
drl_implementation/agent/utils/replay_buffer.py
|
IanYangChina/DRL_Implementation
|
6dd0a94e4c3adbe16265d3b3780efc2b5e8d7047
|
[
"MIT"
] | 11
|
2019-11-29T23:36:32.000Z
|
2021-07-21T08:40:27.000Z
|
drl_implementation/agent/utils/replay_buffer.py
|
IanYangChina/DRL_Implementation
|
6dd0a94e4c3adbe16265d3b3780efc2b5e8d7047
|
[
"MIT"
] | 2
|
2021-06-12T14:18:14.000Z
|
2021-10-05T09:41:00.000Z
|
drl_implementation/agent/utils/replay_buffer.py
|
IanYangChina/DRL_Implementation
|
6dd0a94e4c3adbe16265d3b3780efc2b5e8d7047
|
[
"MIT"
] | 4
|
2021-01-05T21:54:14.000Z
|
2021-10-05T05:15:35.000Z
|
import random as R
import numpy as np
from .segment_tree import SumSegmentTree, MinSegmentTree
from collections import namedtuple
class ReplayBuffer(object):
def __init__(self, capacity, tr_namedtuple, seed=0, saving_path=None):
R.seed(seed)
self.saving_path = saving_path
self.capacity = capacity
self.memory = []
self.position = 0 # 99, rewrite from the 0-th transition
self.Transition = tr_namedtuple
def store_experience(self, *args):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = self.Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
batch = R.sample(self.memory, batch_size) # uniform sampling
return self.Transition(*zip(*batch))
def save_as_npy(self, start=None, end=None):
assert self.saving_path is not None
if start is None:
batch = self.Transition(*zip(*self.memory))
else:
assert end is not None
batch = self.Transition(*zip(*self.memory[start:end]))
np.save(self.saving_path + '/state', np.array(batch.state))
np.save(self.saving_path + '/action', np.array(batch.action))
np.save(self.saving_path + '/next_state', np.array(batch.next_state))
np.save(self.saving_path + '/reward', np.array(batch.reward))
np.save(self.saving_path + '/done', np.array(batch.done))
def load_from_npy(self):
assert self.saving_path is not None
state = np.load(self.saving_path + '/state.npy')
action = np.load(self.saving_path + '/action.npy')
next_state = np.load(self.saving_path + '/next_state.npy')
reward = np.load(self.saving_path + '/reward.npy')
done = np.load(self.saving_path + '/done.npy')
for i in range(state.shape[0]):
self.store_experience(state[i],
action[i],
next_state[i],
reward[i],
done[i])
def clear_memory(self):
self.memory.clear()
self.position = 0
@property
def full_memory(self):
return self.Transition(*zip(*self.memory))
def __len__(self):
return len(self.memory)
class EpisodeWiseReplayBuffer(object):
def __init__(self, capacity, tr_namedtuple, seed=0):
R.seed(seed)
self.capacity = capacity
self.memory = []
self.position = 0
self.new_episode = False
self.episodes = []
self.ep_position = -1
self.Transition = tr_namedtuple
def store_experience(self, *args):
# $new_episode is a boolean value
if self.new_episode:
self.episodes.append([])
self.ep_position += 1
self.episodes[self.ep_position].append(self.Transition(*args))
def store_episodes(self):
if len(self.episodes) == 0:
return
for ep in self.episodes:
for n in range(len(ep)):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = ep[n]
self.position = (self.position + 1) % self.capacity
self.episodes.clear()
self.ep_position = -1
def sample(self, batch_size):
batch = R.sample(self.memory, batch_size)
return self.Transition(*zip(*batch))
def __len__(self):
return len(self.memory)
class HindsightReplayBuffer(EpisodeWiseReplayBuffer):
def __init__(self, capacity, tr_namedtuple, store_goal_ind=False,
sampling_strategy='future', sampled_goal_num=6, terminate_on_achieve=False,
goal_distance_threshold=0.05,
seed=0):
self.sampling_strategy = sampling_strategy
assert self.sampling_strategy in ['final', 'episode', 'future']
self.k = sampled_goal_num
self.terminate_on_achieve = terminate_on_achieve
self.goal_distance_threshold = goal_distance_threshold
self.store_goal_ind = store_goal_ind
EpisodeWiseReplayBuffer.__init__(self, capacity, tr_namedtuple, seed)
def modify_episodes(self):
if len(self.episodes) == 0:
return
if self.sampling_strategy != 'future':
# 'episode' or 'final' strategy
for _ in range(len(self.episodes)):
ep = self.episodes[_]
if len(ep) < self.k:
continue
imagined_goals = self.sample_achieved_goal(ep)
for n in range(len(imagined_goals[0])):
ind = imagined_goals[0][n]
goal = imagined_goals[1][n]
modified_ep = []
for tr in range(ind + 1):
s = ep[tr].state
dg = goal
a = ep[tr].action
ns = ep[tr].next_state
ag = ep[tr].achieved_goal
r = goal_distance_reward(dg, ag, self.goal_distance_threshold)
if self.terminate_on_achieve:
d = 0 if r == 0.0 else 1
else:
d = ep[tr].done
if not self.store_goal_ind:
modified_ep.append(self.Transition(s, dg, a, ns, ag, r, d))
else:
modified_ep.append(self.Transition(s, dg, a, ns, ag, r, d, ep[tr].goal_ind))
self.episodes.append(modified_ep)
else:
for _ in range(len(self.episodes)):
# 'future' strategy
# for each transition, sample k achieved goals after that transition to replace the desired goal
ep = self.episodes[_]
if len(ep) < self.k:
continue
for tr_ind in range(len(ep) - self.k):
future_inds = R.sample(np.arange(tr_ind + 1, len(ep), dtype="int").tolist(), self.k)
modified_ep = []
for ind in future_inds:
s = ep[tr_ind].state
dg = ep[ind].achieved_goal
a = ep[tr_ind].action
ns = ep[tr_ind].next_state
ag = ep[tr_ind].achieved_goal
r = goal_distance_reward(dg, ag, self.goal_distance_threshold)
if self.terminate_on_achieve:
d = 0 if r == 0.0 else 1
else:
d = ep[tr_ind].done
if not self.store_goal_ind:
modified_ep.append(self.Transition(s, dg, a, ns, ag, r, d))
else:
modified_ep.append(self.Transition(s, dg, a, ns, ag, r, d, ep[tr_ind].goal_ind))
self.episodes.append(modified_ep)
def sample_achieved_goal(self, ep):
goals = [[], []]
if self.sampling_strategy == 'episode':
goals[0] = R.sample(np.arange(0, len(ep), dtype="int").tolist(), self.k)
for ind in goals[0]:
goals[1].append(ep[ind].achieved_goal)
elif self.sampling_strategy == 'final':
goals[0].append(len(ep) - 1)
goals[1].append(ep[-1].achieved_goal)
return goals
class PrioritisedReplayBuffer(object):
def __init__(self, capacity, tr_namedtuple, alpha=0.5, beta=0.8, epsilon=1e-6, rng=None, saving_path=None):
self.saving_path = saving_path
if rng is None:
self.rng = np.random.default_rng(seed=0)
else:
self.rng = rng
self.capacity = capacity
self.memory = []
self.mem_position = 0
self.Transition = tr_namedtuple
self.alpha = alpha
self.beta = beta
self.epsilon = epsilon
tree_capacity = 1
while tree_capacity < capacity:
tree_capacity *= 2
self.sum_tree = SumSegmentTree(tree_capacity)
self.min_tree = MinSegmentTree(tree_capacity)
self._max_priority = 1.0
def store_experience(self, *args):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.mem_position] = self.Transition(*args)
self.sum_tree[self.mem_position] = self._max_priority ** self.alpha
self.min_tree[self.mem_position] = self._max_priority ** self.alpha
self.mem_position = (self.mem_position + 1) % self.capacity
def store_experience_with_given_priority(self, priority, *args):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.mem_position] = self.Transition(*args)
self.sum_tree[self.mem_position] = (priority + self.epsilon) ** self.alpha
self.min_tree[self.mem_position] = (priority + self.epsilon) ** self.alpha
self.mem_position = (self.mem_position + 1) % self.capacity
def sample(self, batch_size, beta=None):
if beta is None:
beta = self.beta
assert beta > 0, "beta should be greater than 0"
inds, priority_sum = self.sample_proportion(batch_size)
batch = []
weights = []
minimal_priority = self.min_tree.min()
max_weight = (minimal_priority / priority_sum * len(self)) ** (-beta)
for ind in inds:
batch.append(self.memory[ind])
sample_priority = self.sum_tree[ind] / priority_sum
weight = (sample_priority * len(self)) ** (-beta)
weight = weight / max_weight
weights.append(weight)
return self.Transition(*zip(*batch)), np.array(weights), inds
def sample_proportion(self, batch_size):
inds = []
priority_sum = self.sum_tree.sum(0, len(self) - 1)
interval = priority_sum / batch_size
for i in range(batch_size):
mass = self.rng.uniform() * interval + i * interval
ind = self.sum_tree.find_prefixsum_idx(mass)
inds.append(ind)
return inds, priority_sum
def update_priority(self, inds, priorities):
for ind, priority in zip(inds, priorities):
assert priority >= 0
assert 0 <= ind < len(self)
self.sum_tree[ind] = (priority + self.epsilon) ** self.alpha
self.min_tree[ind] = (priority + self.epsilon) ** self.alpha
self._max_priority = max(self._max_priority, priority)
def save_as_npy(self, start=None, end=None):
assert self.saving_path is not None
if start is None:
batch = self.Transition(*zip(*self.memory))
else:
assert end is not None
batch = self.Transition(*zip(*self.memory[start:end]))
np.save(self.saving_path + '/state', np.array(batch.state))
np.save(self.saving_path + '/action', np.array(batch.action))
np.save(self.saving_path + '/next_state', np.array(batch.next_state))
np.save(self.saving_path + '/reward', np.array(batch.reward))
np.save(self.saving_path + '/done', np.array(batch.done))
def load_from_npy(self):
assert self.saving_path is not None
state = np.load(self.saving_path + '/state.npy')
action = np.load(self.saving_path + '/action.npy')
next_state = np.load(self.saving_path + '/next_state.npy')
reward = np.load(self.saving_path + '/reward.npy')
done = np.load(self.saving_path + '/done.npy')
for i in range(state.shape[0]):
self.store_experience(state[i],
action[i],
next_state[i],
reward[i],
done[i])
def clear_memory(self):
self.memory.clear()
self.mem_position = 0
@property
def full_memory(self):
return self.Transition(*zip(*self.memory))
def __len__(self):
return len(self.memory)
class PrioritisedEpisodeWiseReplayBuffer(object):
def __init__(self, capacity, tr_namedtuple, alpha=0.5, beta=0.8, epsilon=1e-6, rng=None):
if rng is None:
self.rng = np.random.default_rng(seed=0)
else:
self.rng = rng
self.capacity = capacity
self.memory = []
self.mem_position = 0
self.new_episode = False
self.episodes = []
self.ep_position = -1
self.Transition = tr_namedtuple
self.alpha = alpha
self.beta = beta
self.epsilon = epsilon
tree_capacity = 1
while tree_capacity < capacity:
tree_capacity *= 2
self.sum_tree = SumSegmentTree(tree_capacity)
self.min_tree = MinSegmentTree(tree_capacity)
self._max_priority = 1.0
def store_experience(self, *args):
if self.new_episode:
self.episodes.append([])
self.ep_position += 1
self.episodes[self.ep_position].append(self.Transition(*args))
def store_episodes(self):
if len(self.episodes) == 0:
return
for ep in self.episodes:
for n in range(len(ep)):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.mem_position] = ep[n]
self.sum_tree[self.mem_position] = self._max_priority ** self.alpha
self.min_tree[self.mem_position] = self._max_priority ** self.alpha
self.mem_position = (self.mem_position + 1) % self.capacity
self.episodes.clear()
self.ep_position = -1
def sample(self, batch_size, beta=None):
if beta is None:
beta = self.beta
assert beta > 0, "beta should be greater than 0"
inds, priority_sum = self.sample_proportion(batch_size)
batch = []
weights = []
minimal_priority = self.min_tree.min()
max_weight = (minimal_priority / priority_sum * len(self)) ** (-beta)
for ind in inds:
batch.append(self.memory[ind])
sample_priority = self.sum_tree[ind] / priority_sum
weight = (sample_priority * len(self)) ** (-beta)
weight = weight / max_weight
weights.append(weight)
return self.Transition(*zip(*batch)), np.array(weights), inds
def sample_proportion(self, batch_size):
inds = []
priority_sum = self.sum_tree.sum(0, len(self) - 1)
interval = priority_sum / batch_size
for i in range(batch_size):
mass = self.rng.uniform() * interval + i * interval
ind = self.sum_tree.find_prefixsum_idx(mass)
inds.append(ind)
return inds, priority_sum
def update_priority(self, inds, priorities):
for ind, priority in zip(inds, priorities):
assert priority >= 0
assert 0 <= ind < len(self)
self.sum_tree[ind] = (priority + self.epsilon) ** self.alpha
self.min_tree[ind] = (priority + self.epsilon) ** self.alpha
self._max_priority = max(self._max_priority, priority)
def __len__(self):
return len(self.memory)
class PrioritisedHindsightReplayBuffer(PrioritisedEpisodeWiseReplayBuffer):
def __init__(self, capacity, tr_namedtuple, alpha=0.5, beta=0.8, store_goal_ind=False,
sampling_strategy='future', sampled_goal_num=4, terminate_on_achieve=False,
goal_distance_threshold=0.05,
rng=None):
self.sampling_strategy = sampling_strategy
assert self.sampling_strategy in ['final', 'episode', 'future']
self.k = sampled_goal_num
self.terminate_on_achieve = terminate_on_achieve
self.goal_distance_threshold = goal_distance_threshold
PrioritisedEpisodeWiseReplayBuffer.__init__(self, capacity, tr_namedtuple, alpha=alpha, beta=beta, rng=rng)
def modify_episodes(self):
if len(self.episodes) == 0:
return
if self.sampling_strategy != 'future':
for _ in range(len(self.episodes)):
# 'episode' or 'final' strategy
ep = self.episodes[_]
if len(ep) < self.k:
continue
imagined_goals = self.sample_achieved_goal(ep)
for n in range(len(imagined_goals[0])):
ind = imagined_goals[0][n]
goal = imagined_goals[1][n]
modified_ep = []
for tr in range(ind + 1):
s = ep[tr].state
dg = goal
a = ep[tr].action
ns = ep[tr].next_state
ag = ep[tr].achieved_goal
r = goal_distance_reward(dg, ag, self.goal_distance_threshold)
if self.terminate_on_achieve:
d = 0 if r == 0.0 else 1
else:
d = ep[tr].done
modified_ep.append(self.Transition(s, dg, a, ns, ag, r, d))
self.episodes.append(modified_ep)
else:
for _ in range(len(self.episodes)):
# 'future' strategy
# for each transition, sample k achieved goals after that transition to replace the desired goal
ep = self.episodes[_]
if len(ep) < self.k:
continue
for tr_ind in range(len(ep) - self.k):
future_inds = R.sample(np.arange(tr_ind + 1, len(ep), dtype="int").tolist(), self.k)
modified_ep = []
for ind in future_inds:
s = ep[tr_ind].state
dg = ep[ind].achieved_goal
a = ep[tr_ind].action
ns = ep[tr_ind].next_state
ag = ep[tr_ind].achieved_goal
r = goal_distance_reward(dg, ag, self.goal_distance_threshold)
if self.terminate_on_achieve:
d = 0 if r == 0.0 else 1
else:
d = ep[tr_ind].done
modified_ep.append(self.Transition(s, dg, a, ns, ag, r, d))
self.episodes.append(modified_ep)
def sample_achieved_goal(self, ep):
goals = [[], []]
if self.sampling_strategy == 'episode':
goals[0] = R.sample(np.arange(0, len(ep), dtype="int").tolist(), self.k)
for ind in goals[0]:
goals[1].append(ep[ind].achieved_goal)
elif self.sampling_strategy == 'final':
goals[0].append(len(ep) - 1)
goals[1].append(ep[-1].achieved_goal)
return goals
def goal_distance_reward(goal_a, goal_b, distance_threshold=0.05):
# sparse distance-based reward function for goal-conditioned env
assert goal_a.shape == goal_b.shape
d = np.linalg.norm(goal_a - goal_b, axis=-1)
return -(d > distance_threshold).astype(np.float32)
def make_buffer(mem_capacity, transition_tuple=None, prioritised=False, seed=0, rng=None,
# the last 4 args are only for goal-conditioned RL buffers
goal_conditioned=False, store_goal_ind=False, sampling_strategy='future', num_sampled_goal=4, terminal_on_achieved=True,
goal_distance_threshold=0.05):
t = namedtuple("transition", ('state', 'action', 'next_state', 'reward', 'done'))
t_goal = namedtuple("transition",
('state', 'desired_goal', 'action', 'next_state', 'achieved_goal', 'reward', 'done'))
mem_capacity = int(mem_capacity)
if not goal_conditioned:
if transition_tuple is None:
transition_tuple = t
if not prioritised:
buffer = ReplayBuffer(mem_capacity, transition_tuple, seed=seed)
else:
buffer = PrioritisedReplayBuffer(mem_capacity, transition_tuple, rng=rng)
else:
if transition_tuple is None:
transition_tuple = t_goal
if not prioritised:
buffer = HindsightReplayBuffer(mem_capacity, transition_tuple,
store_goal_ind=store_goal_ind,
sampling_strategy=sampling_strategy,
sampled_goal_num=num_sampled_goal,
terminate_on_achieve=terminal_on_achieved,
seed=seed,
goal_distance_threshold=goal_distance_threshold)
else:
buffer = PrioritisedHindsightReplayBuffer(mem_capacity,
transition_tuple,
store_goal_ind=store_goal_ind,
sampling_strategy=sampling_strategy,
sampled_goal_num=num_sampled_goal,
terminate_on_achieve=terminal_on_achieved,
rng=rng)
return buffer
| 42.703777
| 136
| 0.555168
| 2,512
| 21,480
| 4.551752
| 0.071258
| 0.03061
| 0.031835
| 0.013993
| 0.875022
| 0.867151
| 0.843712
| 0.836278
| 0.816775
| 0.794822
| 0
| 0.009147
| 0.343436
| 21,480
| 502
| 137
| 42.788845
| 0.801603
| 0.022858
| 0
| 0.876712
| 0
| 0
| 0.0215
| 0
| 0
| 0
| 0
| 0
| 0.034247
| 1
| 0.089041
| false
| 0
| 0.009132
| 0.013699
| 0.157534
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c67d0b9bdd746a6a4a0c229ebbe62945cc73ac85
| 18,537
|
py
|
Python
|
recommender/dimadb/models.py
|
cnam0203/trivi-backend
|
d6a4c6c600bdf22fd45c72c25c7ab55281339a0c
|
[
"MIT"
] | null | null | null |
recommender/dimadb/models.py
|
cnam0203/trivi-backend
|
d6a4c6c600bdf22fd45c72c25c7ab55281339a0c
|
[
"MIT"
] | null | null | null |
recommender/dimadb/models.py
|
cnam0203/trivi-backend
|
d6a4c6c600bdf22fd45c72c25c7ab55281339a0c
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
# DB for Machine Learning Model
class LdaSimilarityVersion(models.Model):
created_at = models.DateTimeField(auto_now_add=True, null=True)
n_topics = models.IntegerField(null=True)
item_type = models.CharField(max_length=150, null=True, blank=True)
n_products = models.IntegerField(null=True)
def __str__(self):
return format(self.created_at)
class LdaSimilarity(models.Model):
source = models.CharField(max_length=150, null=True, blank=True)
target = models.CharField(max_length=150, null=True, blank=True)
item_type = models.CharField(max_length=150, null=True, blank=True)
similarity = models.DecimalField(max_digits=10, decimal_places=7)
version = models.CharField(max_length=150, null=True, blank=True)
# Import_info:
class ImportInfo(models.Model):
id = models.AutoField(primary_key=True)
table_name = models.CharField(max_length=50, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
# New_event:
class Events(models.Model):
id = models.AutoField(primary_key=True)
event_id = models.CharField(max_length=150, unique=True)
event_name = models.CharField(max_length=150, null=True, blank=True)
event_title = models.CharField(max_length=150, null=True, blank=True)
event_type = models.CharField(max_length=150, null=True, blank=True)
event_price = models.DecimalField(
max_digits=5, decimal_places=2, null=True, blank=True)
slug = models.CharField(max_length=150, null=True, blank=True)
lang = models.CharField(max_length=150, null=True, blank=True)
img = models.CharField(max_length=150, null=True, blank=True)
url = models.CharField(max_length=150, null=True, blank=True)
start_date = models.DateTimeField(null=True)
end_date = models.DateTimeField(null=True)
next_date = models.DateTimeField(null=True)
count_down = models.IntegerField(null=True)
recurring_freg = models.IntegerField(null=True)
recurring_count = models.IntegerField(null=True)
recurring_by_day = models.IntegerField(null=True)
is_public = models.CharField(max_length=10, choices=(
('True', True), ('False', False)), default='True')
status = models.CharField(max_length=30, null=True, blank=True)
description = models.TextField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True, null=True)
created_by = models.CharField(max_length=150, null=True, blank=True)
modified_at = models.DateTimeField(auto_now=True, null=True)
modified_by = models.CharField(max_length=150, null=True, blank=True)
group_id = models.CharField(max_length=30, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# New_event:
class Products(models.Model):
id = models.AutoField(primary_key=True)
product_id = models.CharField(max_length=150, unique=True)
product_name = models.CharField(max_length=150, null=True, blank=True)
product_type = models.CharField(max_length=150, null=True, blank=True)
product_price = models.DecimalField(
max_digits=5, decimal_places=2, null=True, blank=True)
product_revenue = models.DecimalField(
max_digits=5, decimal_places=2, null=True, blank=True)
price_type = models.CharField(max_length=50, null=True, blank=True)
status = models.CharField(max_length=30, null=True, blank=True)
slug = models.CharField(max_length=150, null=True, blank=True)
img = models.CharField(max_length=150, null=True, blank=True)
url = models.CharField(max_length=150, null=True, blank=True)
description = models.TextField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True, null=True)
created_by = models.CharField(max_length=150, null=True, blank=True)
modified_at = models.DateTimeField(auto_now=True, null=True)
modified_by = models.CharField(max_length=150, null=True, blank=True)
group_id = models.CharField(max_length=30, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# Location
class GeoLocation(models.Model):
id = models.AutoField(primary_key=True)
location_id = models.CharField(max_length=50, null=True, blank=True)
location_name = models.CharField(max_length=50, null=True, blank=True)
address = models.CharField(max_length=150, null=True, blank=True)
address2 = models.CharField(max_length=150, null=True, blank=True)
longitude = models.CharField(max_length=50, null=True, blank=True)
latitude = models.CharField(max_length=50, null=True, blank=True)
city = models.CharField(max_length=50, null=True, blank=True)
state = models.CharField(max_length=50, null=True, blank=True)
region = models.CharField(max_length=50, null=True, blank=True)
zip = models.CharField(max_length=50, null=True, blank=True)
country = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# Resource
class Resource(models.Model):
id = models.AutoField(primary_key=True)
resource_id = models.CharField(max_length=50, null=True, blank=True)
resource_name = models.CharField(max_length=150, null=True, blank=True)
resource_type = models.CharField(max_length=50, null=True, blank=True)
resource_url = models.CharField(max_length=200)
import_id = models.CharField(max_length=30, null=True, blank=True)
# PriceType
class PriceType(models.Model):
id = models.AutoField(primary_key=True)
price_type_id = models.CharField(max_length=50, null=True, blank=True)
price_type_name = models.CharField(max_length=50, null=True, blank=True)
price_type_currency = models.CharField(
max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# BusinessEntity
class BusinessEntity(models.Model):
id = models.AutoField(primary_key=True)
entity_id = models.CharField(max_length=50, null=True, blank=True)
entity_name = models.CharField(max_length=50, null=True, blank=True)
slug = models.CharField(max_length=150, null=True, blank=True)
description = models.TextField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True, null=True)
created_by = models.CharField(max_length=50, null=True, blank=True)
modified_at = models.DateTimeField(auto_now=True, null=True)
modified_by = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# EntityLocation
class EntityLocation(models.Model):
id = models.AutoField(primary_key=True)
entity_id = models.CharField(max_length=50, null=True, blank=True)
location_id = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# EventLocation
class EventLocation(models.Model):
id = models.AutoField(primary_key=True)
event_id = models.CharField(max_length=50, null=True, blank=True)
location_id = models.CharField(max_length=50, null=True, blank=True)
room = models.CharField(max_length=50, null=True, blank=True)
description = models.TextField(null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# EntityEventRole
class EntityEventRole(models.Model):
id = models.AutoField(primary_key=True)
entity_id = models.CharField(max_length=50, null=True, blank=True)
event_id = models.CharField(max_length=50, null=True, blank=True)
role_name = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# EventResource
class EventResource(models.Model):
id = models.AutoField(primary_key=True)
event_id = models.CharField(max_length=50, null=True, blank=True)
resource_id = models.CharField(max_length=50, null=True, blank=True)
description = models.TextField(null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# EntityResource
class EntityResource(models.Model):
id = models.AutoField(primary_key=True)
entity_id = models.CharField(max_length=50, null=True, blank=True)
resource_id = models.CharField(max_length=50, null=True, blank=True)
description = models.TextField(null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# EventSimilarity
class EventSimilarity(models.Model):
id = models.AutoField(primary_key=True)
source_id = models.CharField(max_length=50, null=True, blank=True)
target_id = models.CharField(max_length=50, null=True, blank=True)
similarity = models.DecimalField(max_digits=5, decimal_places=2)
algo = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# EntityProductRole
class EntityProductRole(models.Model):
id = models.AutoField(primary_key=True)
entity_id = models.CharField(max_length=50, null=True, blank=True)
product_id = models.CharField(max_length=50, null=True, blank=True)
role_name = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# ProductResource
class ProductResource(models.Model):
id = models.AutoField(primary_key=True)
product_id = models.CharField(max_length=50, null=True, blank=True)
resource_id = models.CharField(max_length=50, null=True, blank=True)
description = models.TextField(null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# ProductSimilarity
class ProductSimilarity(models.Model):
id = models.AutoField(primary_key=True)
source_id = models.CharField(max_length=50, null=True, blank=True)
target_id = models.CharField(max_length=50, null=True, blank=True)
similarity = models.DecimalField(max_digits=5, decimal_places=2)
algo = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# EventProduct
class EventProduct(models.Model):
id = models.AutoField(primary_key=True)
event_id = models.CharField(max_length=50, null=True, blank=True)
product_id = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# Event Preference
class EventPreference(models.Model):
id = models.AutoField(primary_key=True)
preference_id = models.CharField(max_length=50, null=True, blank=True)
preference_type = models.CharField(max_length=50, null=True, blank=True)
preference_value = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)
event_id = models.CharField(max_length=50, null=True, blank=True)
activity_id = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# Product Preferene
class ProductPreference(models.Model):
id = models.AutoField(primary_key=True)
preference_id = models.CharField(max_length=50, null=True, blank=True)
preference_type = models.CharField(max_length=50, null=True, blank=True)
preference_value = models.DecimalField(max_digits=5, decimal_places=2, null=True, blank=True)
product_id = models.CharField(max_length=50, null=True, blank=True)
activity_id = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# Session
class Session(models.Model):
id = models.AutoField(primary_key=True)
visit_id = models.CharField(max_length=50, null=True, blank=True)
visit_date = models.DateField(null=True, blank=True)
visit_start_time = models.DateTimeField(null=True, blank=True)
visit_end_time = models.DateTimeField(null=True, blank=True)
visit_number = models.CharField(max_length=50, null=True, blank=True)
visit_duration = models.IntegerField(null=True)
operating_system = models.CharField(max_length=150, null=True, blank=True)
device_category = models.CharField(max_length=150, null=True, blank=True)
device_brand = models.CharField(max_length=150, null=True, blank=True)
browser = models.CharField(max_length=150, null=True, blank=True)
page_title = models.CharField(max_length=150, null=True, blank=True)
page_location = models.CharField(max_length=150, null=True, blank=True)
event_name = models.CharField(max_length=150, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True, null=True)
customer_id = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
class SessionLocation(models.Model):
id = models.AutoField(primary_key=True)
session_id = models.CharField(max_length=50, null=True, blank=True)
location_id = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# Customer
class Customer(models.Model):
id = models.AutoField(primary_key=True)
customer_id = models.CharField(max_length=50, null=True, blank=True)
ip_address = models.CharField(max_length=50, null=True, blank=True)
contact_id = models.CharField(max_length=50, null=True, blank=True)
profile_id = models.CharField(max_length=50, null=True, blank=True)
location_id = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# Profile
class ProfileCustomer(models.Model):
id = models.AutoField(primary_key=True)
profile_id = models.CharField(max_length=50, null=True, blank=True)
first_name = models.CharField(max_length=50, null=True, blank=True)
last_name = models.CharField(max_length=50, null=True, blank=True)
age = models.IntegerField(null=True)
gender = models.CharField(max_length=10, choices=(
('male', 'male'), ('female', 'female')), default='event')
import_id = models.CharField(max_length=30, null=True, blank=True)
# Journey
class Journey(models.Model):
id = models.AutoField(primary_key=True)
journey_id = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# Interaction
class Interaction(models.Model):
id = models.AutoField(primary_key=True)
interaction_id = models.CharField(max_length=50, null=True, blank=True)
session_id = models.CharField(max_length=50, null=True, blank=True)
journey_id = models.CharField(max_length=50, null=True, blank=True)
customer_id = models.CharField(max_length=50, null=True, blank=True)
visit_date = models.DateField(null=True, blank=True)
operating_system = models.CharField(max_length=150, null=True, blank=True)
device_category = models.CharField(max_length=150, null=True, blank=True)
device_brand = models.CharField(max_length=150, null=True, blank=True)
browser = models.CharField(max_length=150, null=True, blank=True)
page_id = models.CharField(max_length=50, null=True, blank=True)
page_title = models.CharField(max_length=150, null=True, blank=True)
page_location = models.CharField(max_length=150, null=True, blank=True)
event_name = models.CharField(max_length=150, null=True, blank=True)
activity_id = models.CharField(max_length=50, null=True, blank=True)
interaction_number = models.IntegerField(null=True, blank=True)
is_entrance = models.CharField(
max_length=10, choices=(('True', True), ('False', False)), null=True, blank=True)
is_exit = models.CharField(max_length=10, choices=(
('True', True), ('False', False)), null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True, null=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
class InteractionLocation(models.Model):
id = models.AutoField(primary_key=True)
interaction_id = models.CharField(max_length=50, null=True, blank=True)
location_id = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# WebPage
class WebPage(models.Model):
id = models.AutoField(primary_key=True)
page_id = models.CharField(max_length=50, null=True, blank=True)
url = models.CharField(max_length=200)
page_path = models.CharField(max_length=200)
page_title = models.CharField(max_length=150, null=True, blank=True)
search_keyword = models.CharField(max_length=150, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# Contact
class Contact(models.Model):
id = models.AutoField(primary_key=True)
contact_id = models.CharField(max_length=50, null=True, blank=True)
contact_name = models.CharField(max_length=50, null=True, blank=True)
email = models.CharField(max_length=50, null=True, blank=True)
phone1 = models.CharField(max_length=50, null=True, blank=True)
phone2 = models.CharField(max_length=50, null=True, blank=True)
url = models.CharField(max_length=50, null=True, blank=True)
business_hour = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
# EntityContactPoint
class EntityContactPoint(models.Model):
id = models.AutoField(primary_key=True)
entity_id = models.CharField(max_length=50, null=True, blank=True)
contact_id = models.CharField(max_length=50, null=True, blank=True)
contact_role = models.CharField(max_length=50, null=True, blank=True)
import_id = models.CharField(max_length=30, null=True, blank=True)
#WebActivityType
class WebActivityType(models.Model):
name = models.CharField(max_length=60, null=True, blank=True)
description = models.TextField(null=True, blank=True)
value = models.DecimalField(max_digits=3, decimal_places=2, null=True, blank=True)
#WebActivity
class WebActivity(models.Model):
page_id = models.CharField(max_length=50, null=True, blank=True)
session = models.CharField(max_length=50, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True, null=True)
browser = models.CharField(max_length=80, null=True)
visitor = models.CharField(max_length=20)
activity_type = models.ForeignKey(
WebActivityType, on_delete=models.CASCADE)
| 47.167939
| 97
| 0.747046
| 2,620
| 18,537
| 5.124427
| 0.069847
| 0.122151
| 0.177194
| 0.231715
| 0.880381
| 0.850737
| 0.840831
| 0.838522
| 0.798153
| 0.755549
| 0
| 0.02574
| 0.13233
| 18,537
| 392
| 98
| 47.288265
| 0.809003
| 0.023251
| 0
| 0.593857
| 0
| 0
| 0.003098
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003413
| false
| 0
| 0.105802
| 0.003413
| 1.068259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
05b85fd65469ac3ca1d65062d1077e0aa9b01ccb
| 233
|
py
|
Python
|
iris_sdk/models/data/ord/area_code_search_order.py
|
NumberAI/python-bandwidth-iris
|
0e05f79d68b244812afb97e00fd65b3f46d00aa3
|
[
"MIT"
] | 2
|
2020-04-13T13:47:59.000Z
|
2022-02-23T20:32:41.000Z
|
iris_sdk/models/data/ord/area_code_search_order.py
|
bandwidthcom/python-bandwidth-iris
|
dbcb30569631395041b92917252d913166f7d3c9
|
[
"MIT"
] | 5
|
2020-09-18T20:59:24.000Z
|
2021-08-25T16:51:42.000Z
|
iris_sdk/models/data/ord/area_code_search_order.py
|
bandwidthcom/python-bandwidth-iris
|
dbcb30569631395041b92917252d913166f7d3c9
|
[
"MIT"
] | 5
|
2018-12-12T14:39:50.000Z
|
2020-11-17T21:42:29.000Z
|
#!/usr/bin/env python
from iris_sdk.models.base_resource import BaseData
from iris_sdk.models.maps.ord.area_code_search_order import \
AreaCodeSearchOrderMap
class AreaCodeSearchOrder(AreaCodeSearchOrderMap, BaseData):
pass
| 29.125
| 61
| 0.828326
| 29
| 233
| 6.448276
| 0.758621
| 0.085562
| 0.117647
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103004
| 233
| 8
| 62
| 29.125
| 0.894737
| 0.085837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
af46753cd78f2117e5076bc7a61fbae650fb6757
| 154
|
py
|
Python
|
src/distance.py
|
brandon-fremin/LightningNetworkAnalysis
|
c7b174e01327173ee71ef9caaa27f97ff89b969c
|
[
"MIT"
] | null | null | null |
src/distance.py
|
brandon-fremin/LightningNetworkAnalysis
|
c7b174e01327173ee71ef9caaa27f97ff89b969c
|
[
"MIT"
] | null | null | null |
src/distance.py
|
brandon-fremin/LightningNetworkAnalysis
|
c7b174e01327173ee71ef9caaa27f97ff89b969c
|
[
"MIT"
] | null | null | null |
def cost_d(channel, amount):
return channel["outpol"]["base"] * 1000 + amount * channel["outpol"]["rate"]
def dist_d(channel, amount):
return 1
| 22
| 80
| 0.655844
| 21
| 154
| 4.714286
| 0.571429
| 0.161616
| 0.282828
| 0.40404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039063
| 0.168831
| 154
| 6
| 81
| 25.666667
| 0.734375
| 0
| 0
| 0
| 0
| 0
| 0.12987
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
af584f6d67fbb9fe713102ee8191f1a82fd3b6b5
| 12,318
|
py
|
Python
|
user_service_sdk/api/apikey/apikey_client.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
user_service_sdk/api/apikey/apikey_client.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
user_service_sdk/api/apikey/apikey_client.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import user_service_sdk.api.apikey.create_apikey_pb2
import user_service_sdk.api.apikey.delete_apikey_pb2
import google.protobuf.empty_pb2
import user_service_sdk.api.apikey.disable_apikey_pb2
import user_service_sdk.api.apikey.enable_apikey_pb2
import user_service_sdk.api.apikey.get_apikey_pb2
import user_service_sdk.api.apikey.list_apikey_pb2
import user_service_sdk.api.apikey.reset_apikey_pb2
import user_service_sdk.utils.http_util
import google.protobuf.json_format
class ApikeyClient(object):
def __init__(self, server_ip="", server_port=0, service_name="", host=""):
"""
初始化client
:param server_ip: 指定sdk请求的server_ip,为空时走名字服务路由
:param server_port: 指定sdk请求的server_port,与server_ip一起使用, 为空时走名字服务路由
:param service_name: 指定sdk请求的service_name, 为空时按契约名称路由。如果server_ip和service_name同时设置,server_ip优先级更高
:param host: 指定sdk请求服务的host名称, 如cmdb.easyops-only.com
"""
if server_ip == "" and server_port != 0 or server_ip != "" and server_port == 0:
raise Exception("server_ip和server_port必须同时指定")
self._server_ip = server_ip
self._server_port = server_port
self._service_name = service_name
self._host = host
def create_api_key(self, request, org, user, timeout=10):
# type: (user_service_sdk.api.apikey.create_apikey_pb2.CreateApiKeyRequest, int, str, int) -> user_service_sdk.api.apikey.create_apikey_pb2.CreateApiKeyResponse
"""
创建用户ApiKey[内部]
:param request: create_api_key请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: user_service_sdk.api.apikey.create_apikey_pb2.CreateApiKeyResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.user_service.apikey.CreateApiKey"
uri = "/api/v1/apikey/{user}".format(
user=request.user,
)
requestParam = request
rsp_obj = user_service_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.user_service_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = user_service_sdk.api.apikey.create_apikey_pb2.CreateApiKeyResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def delete_api_key(self, request, org, user, timeout=10):
# type: (user_service_sdk.api.apikey.delete_apikey_pb2.DeleteApiKeyRequest, int, str, int) -> google.protobuf.empty_pb2.Empty
"""
删除用户ApiKey[内部]
:param request: delete_api_key请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: google.protobuf.empty_pb2.Empty
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.user_service.apikey.DeleteApiKey"
uri = "/api/v1/apikey/delete/{access_key}".format(
access_key=request.access_key,
)
requestParam = request
rsp_obj = user_service_sdk.utils.http_util.do_api_request(
method="DELETE",
src_name="logic.user_service_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = google.protobuf.empty_pb2.Empty()
google.protobuf.json_format.ParseDict(rsp_obj, rsp, ignore_unknown_fields=True)
return rsp
def disable_api_key(self, request, org, user, timeout=10):
# type: (user_service_sdk.api.apikey.disable_apikey_pb2.DisableApiKeyRequest, int, str, int) -> google.protobuf.empty_pb2.Empty
"""
禁用用户ApiKey[内部]
:param request: disable_api_key请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: google.protobuf.empty_pb2.Empty
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.user_service.apikey.DisableApiKey"
uri = "/api/v1/apikey/disable/{access_key}".format(
access_key=request.access_key,
)
requestParam = request
rsp_obj = user_service_sdk.utils.http_util.do_api_request(
method="PUT",
src_name="logic.user_service_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = google.protobuf.empty_pb2.Empty()
google.protobuf.json_format.ParseDict(rsp_obj, rsp, ignore_unknown_fields=True)
return rsp
def enable_api_key(self, request, org, user, timeout=10):
# type: (user_service_sdk.api.apikey.enable_apikey_pb2.EnableApiKeyRequest, int, str, int) -> google.protobuf.empty_pb2.Empty
"""
启用用户ApiKey[内部]
:param request: enable_api_key请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: google.protobuf.empty_pb2.Empty
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.user_service.apikey.EnableApiKey"
uri = "/api/v1/apikey/enable/{access_key}".format(
access_key=request.access_key,
)
requestParam = request
rsp_obj = user_service_sdk.utils.http_util.do_api_request(
method="PUT",
src_name="logic.user_service_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = google.protobuf.empty_pb2.Empty()
google.protobuf.json_format.ParseDict(rsp_obj, rsp, ignore_unknown_fields=True)
return rsp
def get_api_key(self, request, org, user, timeout=10):
# type: (google.protobuf.empty_pb2.Empty, int, str, int) -> user_service_sdk.api.apikey.get_apikey_pb2.GetApiKeyResponse
"""
查询个人apikey
:param request: get_api_key请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: user_service_sdk.api.apikey.get_apikey_pb2.GetApiKeyResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.user_service.apikey.GetApiKey"
uri = "/profile/apikey"
requestParam = request
rsp_obj = user_service_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.user_service_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = user_service_sdk.api.apikey.get_apikey_pb2.GetApiKeyResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def list_api_key(self, request, org, user, timeout=10):
# type: (user_service_sdk.api.apikey.list_apikey_pb2.ListApiKeyRequest, int, str, int) -> user_service_sdk.api.apikey.list_apikey_pb2.ListApiKeyResponse
"""
获取用户ApiKey[内部]
:param request: list_api_key请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: user_service_sdk.api.apikey.list_apikey_pb2.ListApiKeyResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.user_service.apikey.ListApiKey"
uri = "/api/v1/apikey"
requestParam = request
rsp_obj = user_service_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.user_service_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = user_service_sdk.api.apikey.list_apikey_pb2.ListApiKeyResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def reset_api_key(self, request, org, user, timeout=10):
# type: (user_service_sdk.api.apikey.reset_apikey_pb2.ResetApiKeyRequest, int, str, int) -> user_service_sdk.api.apikey.reset_apikey_pb2.ResetApiKeyResponse
"""
重置用户ApiKey[内部]
:param request: reset_api_key请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: user_service_sdk.api.apikey.reset_apikey_pb2.ResetApiKeyResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.user_service.apikey.ResetApiKey"
uri = "/api/v1/apikey/_reset/{user}".format(
user=request.user,
)
requestParam = request
rsp_obj = user_service_sdk.utils.http_util.do_api_request(
method="PUT",
src_name="logic.user_service_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = user_service_sdk.api.apikey.reset_apikey_pb2.ResetApiKeyResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
| 37.554878
| 168
| 0.619581
| 1,403
| 12,318
| 5.117605
| 0.093371
| 0.072006
| 0.077994
| 0.059192
| 0.85195
| 0.848189
| 0.83663
| 0.835376
| 0.762535
| 0.691086
| 0
| 0.006804
| 0.284137
| 12,318
| 327
| 169
| 37.669725
| 0.807439
| 0.205959
| 0
| 0.721698
| 0
| 0
| 0.081384
| 0.068557
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037736
| false
| 0
| 0.056604
| 0
| 0.132075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
af63e1bfecde6eb9dc35c107368f9bf07096cd07
| 36,014
|
py
|
Python
|
backend_project/app.py
|
DouglasAmorim/scholl_communication_backend
|
8d764fe45ebaa9f96266f194c54a5e579366658c
|
[
"MIT"
] | 1
|
2022-02-02T17:52:28.000Z
|
2022-02-02T17:52:28.000Z
|
backend_project/app.py
|
DouglasAmorim/scholl_communication_backend
|
8d764fe45ebaa9f96266f194c54a5e579366658c
|
[
"MIT"
] | 7
|
2022-02-02T18:03:04.000Z
|
2022-03-09T23:17:18.000Z
|
backend_project/app.py
|
DouglasAmorim/school_communication_backend
|
8d764fe45ebaa9f96266f194c54a5e579366658c
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from flask import Flask, request, jsonify
from flask_restful import Resource, Api
from sqlalchemy import create_engine
import pika
from json import dumps
from flask_jwt_extended import jwt_required, create_access_token, create_refresh_token, JWTManager, get_jwt_identity
from werkzeug.security import generate_password_hash, check_password_hash
db_connect = create_engine('sqlite:///db_scholl_app.sqlite')
app = Flask(__name__)
#TODO: REFACTOR THIS SECRET KEY#
app.config.from_mapping(
JWT_SECRET_KEY = 'JWT_SECRET_KEY'
)
JWTManager(app)
api = Api(app)
class OperatorDb():
def getAlunoById(self, id):
conn = db_connect.connect()
query = conn.execute("select * from Alunos where AlunosId =%d" % int(id))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return result
def getPaisByUsername(username):
conn = db_connect.connect()
query = conn.execute("select * from Pais")
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
for i in result:
if username == i['Username']:
return i
pass
def getPaisByName(name):
conn = db_connect.connect()
query = conn.execute("select * from Pais")
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
for i in result:
if name == i['Nome']:
return i
pass
def getProfessorByUsername(username):
conn = db_connect.connect()
query = conn.execute("select * from Professores")
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
for i in result:
if username == i['Username']:
return i
pass
def getProfessorByName(name):
conn = db_connect.connect()
query = conn.execute("select * from Professores")
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
for i in result:
if name == i['Nome']:
return i
pass
def getAlunoByUsername(username):
conn = db_connect.connect()
query = conn.execute("select * from Alunos")
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
for i in result:
if username == i['Username']:
return i
pass
def getEscolaByUsername(username):
conn = db_connect.connect()
query = conn.execute("select * from School")
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
for i in result:
if username == i['Username']:
return i
pass
def getAlunoByName(name):
conn = db_connect.connect()
query = conn.execute("select * from Alunos")
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
for i in result:
if name == i['Nome']:
return i
pass
class QueueAlunos(Resource):
@jwt_required()
def get(self, id):
conn = db_connect.connect()
query = conn.execute("select * from QueueAluno where QueueID = %d" % int(id))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return jsonify(result)
class QueueProfessor(Resource):
@jwt_required()
def get(self, id):
conn = db_connect.connect()
query = conn.execute("select * from QueueProfessor where QueueID = %d" % int(id))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return jsonify(result)
class QueuePais(Resource):
@jwt_required()
def get(self, id):
conn = db_connect.connect()
query = conn.execute("select * from QueuePais where QueueID = %d" % int(id))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return jsonify(result)
class StudentLogin(Resource):
def post(self, user, password):
aluno = OperatorDb.getAlunoByUsername(user)
if aluno != None:
isPassCorrect = check_password_hash(aluno['Senha'], password)
if isPassCorrect:
refresh = create_refresh_token(identity= aluno['AlunosId'])
access = create_access_token(identity= aluno['AlunosId'])
return jsonify({
'aluno':aluno,
'access': access,
'refresh':refresh
})
class SchoolLogin(Resource):
def post(self, user, password):
escola = OperatorDb.getEscolaByUsername(user)
if escola != None:
isPassCorrect = check_password_hash(escola['Senha'], password)
if isPassCorrect:
refresh = create_refresh_token(identity=escola['SchoolId'])
access = create_access_token(identity=escola['SchoolId'])
return jsonify({
'escola': escola,
'access': access,
'refresh': refresh
})
class TeacherLogin(Resource):
def post(self, user, password):
professor = OperatorDb.getProfessorByUsername(user)
if professor != None:
isPassCorrect = check_password_hash(professor['Senha'], password)
if isPassCorrect:
refresh = create_refresh_token(identity=professor['ProfessoresId'])
access = create_access_token(identity=professor['ProfessoresId'])
return jsonify({
'professor': professor,
'access': access,
'refresh': refresh
})
class ParentsLogin(Resource):
def post(self, user, password):
pais = OperatorDb.getPaisByUsername(user)
if pais != None:
isPassCorrect = check_password_hash(pais['Senha'], password)
if isPassCorrect:
refresh = create_refresh_token(identity=pais['PaisId'])
access = create_access_token(identity=pais['PaisId'])
return jsonify({
'pais': pais,
'access': access,
'refresh': refresh
})
class Logout(Resource):
@jwt_required()
def post(self):
self.isAuthenticated= False
object = {'IsAuthenticated': 'False'}
return [dict(zip(tuple(object.keys()), i)) for i in object]
class Alunos(Resource):
@jwt_required()
def get(self):
conn = db_connect.connect()
query = conn.execute("select * from Alunos")
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return jsonify(result)
class AlunoById(Resource):
@jwt_required()
def get(self, id):
return jsonify(OperatorDb.getAlunoById(self, id))
# conn = db_connect.connect()
# query = conn.execute("select * from Alunos where AlunosId =%d" % int(id))
# result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
# return jsonify(result)
class ProfessoresContactsPais(Resource):
@jwt_required()
def get(self, id):
conn = db_connect.connect()
query = conn.execute("select * from Turma_has_Professores where ProfessoresId = %d" % int(id))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
turmaId = result[0]['TurmaId']
query = conn.execute("select * from Alunos_has_Pais where TurmaId = %d" % int(turmaId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
finalResult = []
for aluno_has_pais in result:
paisId = aluno_has_pais['PaisID']
query = conn.execute("select * from Pais where PaisId = %d" % int(paisId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
result[0]['Senha'] = ''
if result[0] not in finalResult:
finalResult.append(result[0])
return jsonify(finalResult)
class ContactsEscola(Resource):
@jwt_required()
def get(self):
conn = db_connect.connect()
query = conn.execute("select * from School")
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return jsonify(result)
class ProfessoresContactsById(Resource):
@jwt_required()
def get(self, id):
conn = db_connect.connect()
query = conn.execute("select * from Turma_has_Professores where ProfessoresId = %d" % int(id))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
turmaId = result[0]['TurmaId']
query = conn.execute("select * from Alunos where TurmaId = %d" % int(turmaId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
result[0]['Senha'] = ''
return jsonify(result)
class PaisContactsById(Resource):
@jwt_required()
def get(self, id):
conn = db_connect.connect()
query = conn.execute("select * from Pais where PaisId = %d" % int(id))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
paisId = result[0]['PaisId']
query = conn.execute("select * from Alunos_has_Pais where PaisId = %d" % int(paisId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
turmaId = result[0]['TurmaId']
query = conn.execute("select * from Turma_has_Professores where TurmaId = %d" % int(turmaId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
finalResult = []
for value in result:
professorId = value['ProfessoresId']
query = conn.execute("select * from Professores where ProfessoresId = %d" % int(professorId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
result[0]['Senha'] = ''
finalResult.append(result[0])
return jsonify(finalResult)
class AlunosContactById(Resource):
@jwt_required()
def get(self, id):
conn = db_connect.connect()
query = conn.execute("select * from Alunos where AlunosId = %d" % int(id))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
turmaId = result[0]['TurmaId']
query = conn.execute("select * from Turma_has_Professores where TurmaId = %d" % int(turmaId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
professorId = result[0]['ProfessoresId']
query = conn.execute("select * from Professores where ProfessoresId = %d" % int(professorId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
result[0]['Senha'] = ''
return jsonify(result)
class EscolaSendMessageToAluno(Resource):
@jwt_required()
def post(self, remetenteNome, destinatarioId, remetenteId, destinatarioQueueId, message):
# SELECT queue aluno from DB
conn = db_connect.connect()
query = conn.execute("select * from QueueAluno where QueueId = %d" % int(destinatarioQueueId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
queueName = result[0]['Nome']
newMessage = '{Remetente:' + remetenteNome + ', Mensagem:' + message + '}'
# RabbitMQ
credentials = pika.PlainCredentials('admin', 'D!o@4701298')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost',
5672,
'/',
credentials))
channel = connection.channel()
channel.queue_declare(queueName,False,True,False,False,None)
channel.basic_publish(exchange= '',
routing_key=queueName,
body=newMessage)
connection.close()
conn.execute("INSERT INTO CaixaEntradaAlunos VALUES(\'%d\', \'%d\', 0, \'%s\', datetime('now'))" % (int(destinatarioId), int(remetenteId), message))
return "success"
class EscolaSendMessageToProfessores(Resource):
@jwt_required()
def post(self, remetenteNome, destinatarioId, remetenteId, destinatarioQueueId, message):
# SELECT queue aluno from DB
conn = db_connect.connect()
query = conn.execute("select * from QueueProfessores where QueueId = %d" % int(destinatarioQueueId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
queueName = result[0]['Nome']
newMessage = '{Remetente:' + remetenteNome + ', Mensagem:' + message + '}'
# RabbitMQ
credentials = pika.PlainCredentials('admin', 'D!o@4701298')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost',
5672,
'/',
credentials))
channel = connection.channel()
channel.queue_declare(queueName, False, True, False, False, None)
channel.basic_publish(exchange='',
routing_key=queueName,
body=newMessage)
connection.close()
conn.execute("INSERT INTO CaixaEntradaProfessores VALUES(\'%d\', \'%d\', 0, 0, \'%s\', datetime('now'))" % ( int(destinatarioId), int(remetenteId), message))
return "success"
class EscolaSendMessageToPais(Resource):
@jwt_required()
def post(self, remetenteNome, destinatarioId, remetenteId, destinatarioQueueId, message):
# SELECT queue aluno from DB
conn = db_connect.connect()
query = conn.execute("select * from QueuePais where QueueId = %d" % int(destinatarioQueueId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
queueName = result[0]['Nome']
newMessage = '{Remetente:' + remetenteNome + ', Mensagem:' + message + '}'
# RabbitMQ
credentials = pika.PlainCredentials('admin', 'D!o@4701298')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost',
5672,
'/',
credentials))
channel = connection.channel()
channel.queue_declare(queueName, False, True, False, False, None)
channel.basic_publish(exchange='',
routing_key=queueName,
body=newMessage)
connection.close()
conn.execute("INSERT INTO CaixaEntradaPais VALUES(\'%d\', 0, \'%d\', 0, \'%s\', datetime('now'))" % ( int(destinatarioId), int(remetenteId), message))
return "success"
class EscolaSendMessageToAluno(Resource):
@jwt_required()
def post(self, remetenteNome, destinatarioId, remetenteId, destinatarioQueueId, message):
# SELECT queue aluno from DB
conn = db_connect.connect()
query = conn.execute("select * from QueueAluno where QueueId = %d" % int(destinatarioQueueId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
queueName = result[0]['Nome']
newMessage = '{Remetente:' + remetenteNome + ', Mensagem:' + message + '}'
# RabbitMQ
credentials = pika.PlainCredentials('admin', 'D!o@4701298')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost',
5672,
'/',
credentials))
channel = connection.channel()
channel.queue_declare(queueName, False, True, False, False, None)
channel.basic_publish(exchange='',
routing_key=queueName,
body=newMessage)
connection.close()
conn.execute("INSERT INTO CaixaEntradaAlunos VALUES(\'%d\', \'%d\', 0, \'%s\', datetime('now'))" % (
int(destinatarioId), int(remetenteId), message))
return "success"
class ProfessorSendMessageToAluno(Resource):
@jwt_required()
def post(self, remetenteNome, destinatarioId, remetenteId, destinatarioQueueId, message):
# SELECT queue aluno from DB
conn = db_connect.connect()
query = conn.execute("select * from QueueAluno where QueueId = %d" % int(destinatarioQueueId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
queueName = result[0]['Nome']
newMessage = '{Remetente:' + remetenteNome + ', Mensagem:' + message + '}'
# RabbitMQ
credentials = pika.PlainCredentials('admin', 'D!o@4701298')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost',
5672,
'/',
credentials))
channel = connection.channel()
channel.queue_declare(queueName,False,True,False,False,None)
channel.basic_publish(exchange= '',
routing_key=queueName,
body=newMessage)
connection.close()
# Insert Message on DB
## VALUES(AlunoId, SchoolId, ProfessorId, Mensagem, Data)
conn.execute("INSERT INTO CaixaEntradaAlunos VALUES(\'%d\', 0, \'%d\', \'%s\', datetime('now'))" % (int(destinatarioId), int(remetenteId), message))
## teste para chegar inclusão
query = conn.execute("select * from CaixaEntradaAlunos")
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
print(result)
return "success"
class ProfessorSendMessageToEscola(Resource):
@jwt_required()
def post(self, remetenteNome, destinatarioId, remetenteId, destinatarioQueueId, message):
# SELECT queue aluno from DB
conn = db_connect.connect()
query = conn.execute("select * from QueueSchool where QueueId = %d" % int(destinatarioQueueId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
queueName = result[0]['Nome']
newMessage = '{Remetente:' + remetenteNome + ', Mensagem:' + message + '}'
# RabbitMQ
credentials = pika.PlainCredentials('admin', 'D!o@4701298')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost',
5672,
'/',
credentials))
channel = connection.channel()
channel.queue_declare(queueName,False,True,False,False,None)
channel.basic_publish(exchange= '',
routing_key=queueName,
body=newMessage)
connection.close()
# Insert Message on DB
conn.execute("INSERT INTO CaixaEntradaEscola VALUES(\'%d\', 0, \'%d\', 0, \'%s\', datetime('now'))" % (int(destinatarioId), int(remetenteId), message))
## teste para chegar inclusão
query = conn.execute("select * from CaixaEntradaAlunos")
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
print(result)
return "success"
class ProfessorSendMessageToPais(Resource):
@jwt_required()
def post(self, remetenteNome, destinatarioId, remetenteId, destinatarioQueueId, message):
# SELECT queue aluno from DB
conn = db_connect.connect()
query = conn.execute("select * from QueuePais where QueueId = %d" % int(destinatarioQueueId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
queueName = result[0]['Nome']
newMessage = '{Remetente:' + remetenteNome + ', Mensagem:' + message + '}'
# RabbitMQ
credentials = pika.PlainCredentials('admin', 'D!o@4701298')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost',
5672,
'/',
credentials))
channel = connection.channel()
channel.queue_declare(queueName,False,True,False,False,None)
channel.basic_publish(exchange= '',
routing_key=queueName,
body=newMessage)
connection.close()
# Insert Message on DB
## VALUES(AlunoId, SchoolId, ProfessorId, Mensagem, Data)
conn.execute("INSERT INTO CaixaEntradaPais VALUES(\'%d\', \'%d\', 0, 0, \'%s\', datetime('now'))" % (int(destinatarioId), int(remetenteId), message))
## teste para chegar inclusão
query = conn.execute("select * from CaixaEntradaPais")
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
print(result)
return "success"
class AlunosSendMessageToProfessores(Resource):
@jwt_required()
def post(self, remetenteNome, destinatarioId, remetenteId, destinatarioQueueId, message):
# SELECT queue aluno from DB
conn = db_connect.connect()
query = conn.execute("select * from QueueProfessor where QueueId = %d" % int(destinatarioQueueId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
queueName = result[0]['Nome']
newMessage = '{Remetente:' + remetenteNome + ', Mensagem:' + message + '}'
# RabbitMQ
credentials = pika.PlainCredentials('admin', 'D!o@4701298')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost',
5672,
'/',
credentials))
channel = connection.channel()
channel.queue_declare(queueName, False, True, False, False, None)
channel.basic_publish(exchange='',
routing_key=queueName,
body=newMessage)
connection.close()
# Insert Message on DB
## VALUES(ProfessorId, SchoolId, PaisId, AlunosId, Mensagem, Data)
conn.execute("INSERT INTO CaixaEntradaProfessores VALUES(\'%d\', 0, 0, \'%d\', \'%s\', datetime('now'))" % (int(destinatarioId), int(remetenteId), message))
return "success"
class AlunosSendMessageToEscola(Resource):
@jwt_required()
def post(self, remetenteNome, destinatarioId, remetenteId, destinatarioQueueId, message):
# SELECT queue aluno from DB
conn = db_connect.connect()
query = conn.execute("select * from QueueSchool where QueueId = %d" % int(destinatarioQueueId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
queueName = result[0]['Nome']
newMessage = '{Remetente:' + remetenteNome + ', Mensagem:' + message + '}'
# RabbitMQ
credentials = pika.PlainCredentials('admin', 'D!o@4701298')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost',
5672,
'/',
credentials))
channel = connection.channel()
channel.queue_declare(queueName, False, True, False, False, None)
channel.basic_publish(exchange='',
routing_key=queueName,
body=newMessage)
connection.close()
# Insert Message on DB
conn.execute("INSERT INTO CaixaEntradaEscola VALUES(\'%d\', 0, 0, \'%d\', \'%s\', datetime('now'))" % (int(destinatarioId), int(remetenteId), message))
return "success"
class PaisSendMessageToProfessores(Resource):
@jwt_required()
def post(self, remetenteNome, destinatarioId, remetenteId, destinatarioQueueId, message):
# SELECT queue aluno from DB
conn = db_connect.connect()
query = conn.execute("select * from QueueProfessor where QueueId = %d" % int(destinatarioQueueId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
queueName = result[0]['Nome']
newMessage = '{Remetente:' + remetenteNome + ', Mensagem:' + message + '}'
# RabbitMQ
credentials = pika.PlainCredentials('admin', 'D!o@4701298')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost',
5672,
'/',
credentials))
channel = connection.channel()
channel.queue_declare(queueName, False, True, False, False, None)
channel.basic_publish(exchange='',
routing_key=queueName,
body=newMessage)
connection.close()
conn.execute("INSERT INTO CaixaEntradaProfessores VALUES(\'%d\', 0, \'%d\', 0, \'%s\', datetime('now'))" % (int(destinatarioId), int(remetenteId), message))
return "success"
class PaisSendMessageToEscola(Resource):
@jwt_required()
def post(self, remetenteNome, destinatarioId, remetenteId, destinatarioQueueId, message):
# SELECT queue aluno from DB
conn = db_connect.connect()
query = conn.execute("select * from QueueSchool where QueueId = %d" % int(destinatarioQueueId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
queueName = result[0]['Nome']
newMessage = '{Remetente:' + remetenteNome + ', Mensagem:' + message + '}'
# RabbitMQ
credentials = pika.PlainCredentials('admin', 'D!o@4701298')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost',
5672,
'/',
credentials))
channel = connection.channel()
channel.queue_declare(queueName, False, True, False, False, None)
channel.basic_publish(exchange='',
routing_key=queueName,
body=newMessage)
connection.close()
conn.execute("INSERT INTO CaixaEntradaEscola VALUES(\'%d\', \'%d\', 0, 0, \'%s\', datetime('now'))" % (int(destinatarioId), int(remetenteId), message))
return "success"
class AlunosGetMessages(Resource):
@jwt_required()
def get(self, alunoId):
conn = db_connect.connect()
query = conn.execute("select * from CaixaEntradaAlunos where AlunoId = %d" % int(alunoId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return result
class EscolaGetMessages(Resource):
@jwt_required()
def get(self, schoolId):
conn = db_connect.connect()
query = conn.execute("select * from CaixaEntradaEscola where SchoolId = %d" % int(schoolId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return result
class ProfessoresGetMessages(Resource):
@jwt_required()
def get(self, professoresId):
conn = db_connect.connect()
query = conn.execute("select * from CaixaEntradaProfessores where ProfessoresId = %d" % int(professoresId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return result
class PaisGetMessages(Resource):
@jwt_required()
def get(self, paisId):
conn = db_connect.connect()
query = conn.execute("select * from CaixaEntradaPais where PaisId = %d" % int(paisId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return result
class AlunosGetSendMessages(Resource):
@jwt_required()
def get(self, alunoId):
conn = db_connect.connect()
query = conn.execute("select * from CaixaEntradaProfessores where AlunoId = %d" % int(alunoId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return result
class AlunosGetSendMessagesEscola(Resource):
@jwt_required()
def get(self, alunoId):
conn = db_connect.connect()
query = conn.execute("select * from CaixaEntradaEscola where AlunoId = %d" % int(alunoId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return result
class ProfessoresGetSendMessagesAlunos(Resource):
@jwt_required()
def get(self, professoresId):
conn = db_connect.connect()
query = conn.execute("select * from CaixaEntradaAlunos where ProfessoresId = %d" % int(professoresId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return result
class ProfessoresGetSendMessagesPais(Resource):
@jwt_required()
def get(self, professoresId):
conn = db_connect.connect()
query = conn.execute("select * from CaixaEntradaPais where ProfessoresId = %d" % int(professoresId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return result
class ProfessoresGetSendMessagesEscola(Resource):
@jwt_required()
def get(self, professoresId):
conn = db_connect.connect()
query = conn.execute("select * from CaixaEntradaEscola where ProfessoresId = %d" % int(professoresId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return result
class PaisGetSendMessagesEscola(Resource):
@jwt_required()
def get(self, paisId):
conn = db_connect.connect()
query = conn.execute("select * from CaixaEntradaEscola where PaisId = %d" % int(paisId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return result
class PaisGetSendMessagesProfessores(Resource):
@jwt_required()
def get(self, paisId):
conn = db_connect.connect()
query = conn.execute("select * from CaixaEntradaProfessores where PaisId = %d" % int(paisId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return result
class EscolaGetSendMessagesProfessores(Resource):
@jwt_required()
def get(self, schoolId):
conn = db_connect.connect()
query = conn.execute("select * from CaixaEntradaProfessores where SchoolId = %d" % int(schoolId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return result
class EscolaGetSendMessagesAlunos(Resource):
@jwt_required()
def get(self, schoolId):
conn = db_connect.connect()
query = conn.execute("select * from CaixaEntradaAlunos where SchoolId = %d" % int(schoolId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return result
class EscolaGetSendMessagesPais(Resource):
@jwt_required()
def get(self, schoolId):
conn = db_connect.connect()
query = conn.execute("select * from CaixaEntradaPais where SchoolId = %d" % int(schoolId))
result = [dict(zip(tuple(query.keys()), i)) for i in query.cursor]
return result
class RefreshToken(Resource):
@jwt_required(refresh=True)
def post(self):
identity = get_jwt_identity()
access = create_access_token(identity=identity)
return jsonify({
'access': access
})
# Refresh Token
api.add_resource(RefreshToken, '/token/refresh')
# LOGIN/LOGOUT
api.add_resource(StudentLogin, '/login/students/<user>/<password>')
api.add_resource(ParentsLogin, '/login/parents/<user>/<password>')
api.add_resource(TeacherLogin, '/login/teacher/<user>/<password>')
api.add_resource(SchoolLogin, '/login/school/<user>/<password>')
api.add_resource(QueueAlunos, '/alunos/queue/<id>')
api.add_resource(QueuePais, '/pais/queue/<id>')
api.add_resource(QueueProfessor, '/professores/queue/<id>')
api.add_resource(Alunos, '/alunos')
api.add_resource(AlunoById, '/alunos/<id>')
## GET Contacts
api.add_resource(AlunosContactById, '/alunos/<id>/contacts')
api.add_resource(PaisContactsById, '/pais/<id>/contacts')
api.add_resource(ProfessoresContactsById, '/professores/<id>/contacts/alunos')
api.add_resource(ProfessoresContactsPais, '/professores/<id>/contacts/pais')
api.add_resource(ContactsEscola, '/contacts/escola')
## Endpoints Send Messages
api.add_resource(EscolaSendMessageToAluno, '/escola/send/alunos/<remetenteNome>/<destinatarioId>/<remetenteId>/<destinatarioQueueId>/<message>')
api.add_resource(EscolaSendMessageToProfessores, '/escola/send/professores/<remetenteNome>/<destinatarioId>/<remetenteId>/<destinatarioQueueId>/<message>')
api.add_resource(EscolaSendMessageToPais, '/escola/send/pais/<remetenteNome>/<destinatarioId>/<remetenteId>/<destinatarioQueueId>/<message>')
api.add_resource(ProfessorSendMessageToAluno, '/professor/send/alunos/<remetenteNome>/<destinatarioId>/<remetenteId>/<destinatarioQueueId>/<message>')
api.add_resource(ProfessorSendMessageToPais, '/professor/send/pais/<remetenteNome>/<destinatarioId>/<remetenteId>/<destinatarioQueueId>/<message>')
api.add_resource(ProfessorSendMessageToEscola,'/professor/send/escola/<remetenteNome>/<destinatarioId>/<remetenteId>/<destinatarioQueueId>/<message>')
api.add_resource(AlunosSendMessageToProfessores, '/alunos/send/professores/<remetenteNome>/<destinatarioId>/<remetenteId>/<destinatarioQueueId>/<message>')
api.add_resource(AlunosSendMessageToEscola, '/alunos/send/escola/<remetenteNome>/<destinatarioId>/<remetenteId>/<destinatarioQueueId>/<message>')
api.add_resource(PaisSendMessageToProfessores, '/pais/send/professores/<remetenteNome>/<destinatarioId>/<remetenteId>/<destinatarioQueueId>/<message>')
api.add_resource(PaisSendMessageToEscola, '/pais/send/escola/<remetenteNome>/<destinatarioId>/<remetenteId>/<destinatarioQueueId>/<message>')
## Endpoints Get Messages
api.add_resource(AlunosGetMessages, '/alunos/messages/received/<alunoId>')
api.add_resource(ProfessoresGetMessages, '/professor/messages/received/<professoresId>')
api.add_resource(PaisGetMessages, '/pais/messages/received/<paisId>')
api.add_resource(EscolaGetMessages, '/escola/messages/received/<schoolId>')
api.add_resource(AlunosGetSendMessages, '/alunos/messages/send/professores/<alunosId>')
api.add_resource(AlunosGetSendMessagesEscola, '/alunos/messages/send/escola/<alunosId>')
api.add_resource(ProfessoresGetSendMessagesAlunos, '/professor/messages/send/alunos/<professoresId>')
api.add_resource(ProfessoresGetSendMessagesPais, '/professor/messages/send/pais/<professoresId>')
api.add_resource(ProfessoresGetSendMessagesEscola, '/professor/messages/send/escola/<professoresId>')
api.add_resource(PaisGetSendMessagesEscola, '/pais/messages/send/escola/<paisId>')
api.add_resource(PaisGetSendMessagesProfessores, '/pais/messages/send/professores/<paisId>')
api.add_resource(EscolaGetSendMessagesAlunos, '/escola/messages/send/alunos/<schoolId>')
api.add_resource(EscolaGetSendMessagesPais, '/escola/messages/send/pais/<schoolId>')
api.add_resource(EscolaGetSendMessagesProfessores, '/escola/messages/send/professores/<schoolId>')
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
app.run()
| 38.353568
| 165
| 0.597712
| 3,515
| 36,014
| 6.058037
| 0.061166
| 0.033578
| 0.01747
| 0.023246
| 0.804452
| 0.77435
| 0.765239
| 0.756786
| 0.749742
| 0.736311
| 0
| 0.006329
| 0.28053
| 36,014
| 938
| 166
| 38.394456
| 0.815484
| 0.031127
| 0
| 0.7136
| 0
| 0.0128
| 0.175837
| 0.059445
| 0
| 0
| 0
| 0.001066
| 0
| 1
| 0.0784
| false
| 0.0384
| 0.0128
| 0.0016
| 0.2368
| 0.0048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bb6f02de351b3c32bbd4beea9c9dcb7bef05cdf2
| 463
|
py
|
Python
|
sanya_script_runtime/__init__.py
|
ARtoriouSs/sanya-script-runtime
|
adef8205b2e85704d98a04f0eb30b30e0ec2e75a
|
[
"WTFPL"
] | null | null | null |
sanya_script_runtime/__init__.py
|
ARtoriouSs/sanya-script-runtime
|
adef8205b2e85704d98a04f0eb30b30e0ec2e75a
|
[
"WTFPL"
] | null | null | null |
sanya_script_runtime/__init__.py
|
ARtoriouSs/sanya-script-runtime
|
adef8205b2e85704d98a04f0eb30b30e0ec2e75a
|
[
"WTFPL"
] | null | null | null |
from sanya_script_runtime.builtins import scan, put, puts, source, target, weight, value, arcs, nodes
from sanya_script_runtime.runtime_error import RuntimeError
from sanya_script_runtime.node import Node
from sanya_script_runtime.arc import Arc
from sanya_script_runtime.graph import Graph
from sanya_script_runtime.num import Num
from sanya_script_runtime.logic import Logic
from sanya_script_runtime.nope import Nope
from sanya_script_runtime.type import Type
| 46.3
| 101
| 0.868251
| 72
| 463
| 5.319444
| 0.333333
| 0.211488
| 0.35248
| 0.516971
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095032
| 463
| 9
| 102
| 51.444444
| 0.914081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a521648284af4d0cbbe2c630e243244925f21aac
| 46,543
|
py
|
Python
|
test/mock_api.py
|
Gates-Zeng/PyGNS3
|
2e84dce91c6c6705c7fd5875846daef1fe301243
|
[
"MIT"
] | 9
|
2017-08-11T09:31:42.000Z
|
2020-03-31T12:59:16.000Z
|
test/mock_api.py
|
Gates-Zeng/PyGNS3
|
2e84dce91c6c6705c7fd5875846daef1fe301243
|
[
"MIT"
] | 3
|
2019-02-22T13:28:34.000Z
|
2019-09-09T16:15:20.000Z
|
test/mock_api.py
|
Gates-Zeng/PyGNS3
|
2e84dce91c6c6705c7fd5875846daef1fe301243
|
[
"MIT"
] | 7
|
2017-10-05T18:25:13.000Z
|
2021-06-28T10:23:18.000Z
|
"""
Contains mocks for testing purposes.
"""
mock_get = {
'/computes/local': '{"capabilities": {"node_types": ["cloud", "ethernet_hub", "ethernet_switch", "vpcs", "virtualbox", "dynamips", "frame_relay_switch", "atm_switch", "qemu", "vmware"], "platform": "darwin", "version": "2.0.3"}, "compute_id": "local", "connected": true, "cpu_usage_percent": 14.3, "host": "127.0.0.1", "memory_usage_percent": 68.4, "name": "DJ-Johns-MBP.fritz.box", "port": 3080, "protocol": "http", "user": "admin"}',
'/version': '{"local": true, "version": "2.0.3"}',
'/computes': '[{"capabilities": {"node_types": ["cloud", "ethernet_hub", "ethernet_switch", "vpcs", "virtualbox", "dynamips", "frame_relay_switch", "atm_switch", "qemu", "vmware"], "platform": "darwin", "version": "2.0.3"}, "compute_id": "local", "connected": true, "cpu_usage_percent": 14.3, "host": "127.0.0.1", "memory_usage_percent": 68.4, "name": "DJ-Johns-MBP.fritz.box", "port": 3080, "protocol": "http", "user": "admin"}, {"capabilities": {"node_types": [], "version": null}, "compute_id": "11df1f68-23ab-42f5-9a93-af65b7daad2a", "connected": false, "cpu_usage_percent": null, "host": "192.168.25.128", "memory_usage_percent": null, "name": "GNS3 VM", "port": 3080, "protocol": "http", "user": null}]',
'/computes/11df1f68-23ab-42f5-9a93-af65b7daad2a': '{"capabilities": {"node_types": [], "version": null}, "compute_id": "11df1f68-23ab-42f5-9a93-af65b7daad2a", "connected": false, "cpu_usage_percent": null, "host": "192.168.25.128", "memory_usage_percent": null, "name": "GNS3 VM", "port": 3080, "protocol": "http", "user": null}',
'/projects': '[{"auto_close": true, "auto_open": false, "auto_start": false, "filename": "Basic 4 Routers.gns3", "name": "Basic 4 Routers", "path": "/Users/maarten/GNS3/Projects/Basic 4 Routers", "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "scene_height": 1000, "scene_width": 2000, "status": "opened"}, {"auto_close": true, "auto_open": false, "auto_start": false, "filename": "Basic Cloud Connection.gns3", "name": "Basic Cloud Connection", "path": "/Users/maarten/GNS3/projects/Basic Cloud Connection", "project_id": "5daa48ff-dbd6-407c-a3c6-645e743f233a", "scene_height": 1000, "scene_width": 2000, "status": "closed"}]',
'/projects/a1ea2a19-2980-41aa-81ab-f1c80be25ca7': '{"auto_close": true, "auto_open": false, "auto_start": false, "filename": "Basic 4 Routers.gns3", "name": "Basic 4 Routers", "path": "/Users/maarten/GNS3/Projects/Basic 4 Routers", "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "scene_height": 1000, "scene_width": 2000, "status": "opened"}',
'/projects/a1ea2a19-2980-41aa-81ab-f1c80be25ca7/drawings': '[{"drawing_id": "dc218a7f-221d-4340-9902-4d2c1726e081", "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "rotation": 0, "svg": "<svg height=\\"20\\" width=\\"67\\"><text fill=\\"#000000\\" fill-opacity=\\"1.0\\" font-family=\\"TypeWriter\\" font-size=\\"10.0\\" font-weight=\\"bold\\">10.0.0.8/30</text></svg>", "x": -298, "y": -16, "z": 1}, {"drawing_id": "43cbd5ca-5da7-43fb-92bf-525cb7b4ee98", "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "rotation": 0, "svg": "<svg height=\\"68\\" width=\\"193\\"><text fill=\\"#000000\\" fill-opacity=\\"1.0\\" font-family=\\"TypeWriter\\" font-size=\\"10.0\\" font-weight=\\"bold\\">NAME PC2 \\nIP/MASK 192.168.20.1/24 \\nGATEWAY 192.168.20.254 \\nMAC 00:50:79:66:68:00 \\nDNS</text></svg>", "x": 428, "y": 116, "z": 1}, {"drawing_id": "1dbed980-73d4-4dc1-afc6-149d559fb5ce", "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "rotation": 0, "svg": "<svg height=\\"20\\" width=\\"67\\"><text fill=\\"#000000\\" fill-opacity=\\"1.0\\" font-family=\\"TypeWriter\\" font-size=\\"10.0\\" font-weight=\\"bold\\">10.0.0.0/30</text></svg>", "x": -113, "y": -171, "z": 1}, {"drawing_id": "a2f423d3-c30c-40cd-85dc-824d5ffa0cc3", "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "rotation": 0, "svg": "<svg height=\\"20\\" width=\\"73\\"><text fill=\\"#000000\\" fill-opacity=\\"1.0\\" font-family=\\"TypeWriter\\" font-size=\\"10.0\\" font-weight=\\"bold\\">10.0.0.12/30</text></svg>", "x": -109, "y": 129, "z": 1}, {"drawing_id": "a9c0d8b8-f66f-4ddb-be1a-6a0730c83aa3", "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "rotation": 0, "svg": "<svg height=\\"20\\" width=\\"67\\"><text fill=\\"#000000\\" fill-opacity=\\"1.0\\" font-family=\\"TypeWriter\\" font-size=\\"10.0\\" font-weight=\\"bold\\">10.0.0.4/30</text></svg>", "x": 80, "y": -16, "z": 1}, {"drawing_id": "5a751cb0-cb68-451c-a88d-58bb6c25f605", "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "rotation": 0, "svg": "<svg height=\\"68\\" width=\\"193\\"><text fill=\\"#000000\\" fill-opacity=\\"1.0\\" font-family=\\"TypeWriter\\" font-size=\\"10.0\\" font-weight=\\"bold\\">NAME PC1 \\nIP/MASK 192.168.10.1/24 \\nGATEWAY 192.168.10.254 \\nMAC 00:50:79:66:68:01 \\nDNS</text></svg>", "x": -696, "y": -184, "z": 1}]',
'/projects/a1ea2a19-2980-41aa-81ab-f1c80be25ca7/links': '[{"capture_file_name": null, "capture_file_path": null, "capturing": false, "link_id": "ec8cc0f1-455a-4c85-adf6-dc1083721cc5", "link_type": "ethernet", "nodes": [{"adapter_number": 0, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "f0/1", "x": 33, "y": 62}, "node_id": "df2f8f9c-23cf-4001-a1d1-834f0ff66436", "port_number": 1}, {"adapter_number": 0, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "f0/0", "x": 45, "y": -26}, "node_id": "7e6c9433-dbab-4b34-a731-2b43a7f77fef", "port_number": 0}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7"}, {"capture_file_name": null, "capture_file_path": null, "capturing": false, "link_id": "1ba79bea-9d55-40d2-9bd0-9e86975cf87a", "link_type": "ethernet", "nodes": [{"adapter_number": 0, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "f0/0", "x": 74, "y": -7}, "node_id": "df2f8f9c-23cf-4001-a1d1-834f0ff66436", "port_number": 0}, {"adapter_number": 0, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "f0/0", "x": -41, "y": -7}, "node_id": "61c67710-3c63-4f0d-bc4c-9680593e1a19", "port_number": 0}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7"}, {"capture_file_name": null, "capture_file_path": null, "capturing": false, "link_id": "7ef771d2-ac8f-46eb-a61b-f1d851fdd952", "link_type": "ethernet", "nodes": [{"adapter_number": 1, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "f1/0", "x": 79, "y": 32}, "node_id": "7e6c9433-dbab-4b34-a731-2b43a7f77fef", "port_number": 0}, {"adapter_number": 0, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "f0/1", "x": -36, "y": 32}, "node_id": "a73e4d0e-2572-4945-8777-2b64919eba95", "port_number": 1}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7"}, {"capture_file_name": null, "capture_file_path": null, "capturing": false, "link_id": "56814a58-c7cd-4384-9a2d-69b80e55d49c", "link_type": "ethernet", "nodes": [{"adapter_number": 2, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "f2/0", "x": 74, "y": 30}, "node_id": "a73e4d0e-2572-4945-8777-2b64919eba95", "port_number": 0}, {"adapter_number": 0, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "e0", "x": -17, "y": 33}, "node_id": "6f58d4cf-2aea-40e4-9d1b-e5bf20f3d51a", "port_number": 0}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7"}, {"capture_file_name": null, "capture_file_path": null, "capturing": false, "link_id": "582f802b-641f-44a7-bd9e-de1ecb60196c", "link_type": "ethernet", "nodes": [{"adapter_number": 0, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "e0", "x": 72, "y": 29}, "node_id": "be1673f7-b534-4263-bf83-ac05eb618360", "port_number": 0}, {"adapter_number": 2, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "f2/0", "x": -40, "y": -8}, "node_id": "df2f8f9c-23cf-4001-a1d1-834f0ff66436", "port_number": 0}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7"}, {"capture_file_name": null, "capture_file_path": null, "capturing": false, "link_id": "008c61c2-d247-4e61-b8a8-1b816c4278ca", "link_type": "ethernet", "nodes": [{"adapter_number": 1, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "f1/0", "x": 48, "y": 52}, "node_id": "61c67710-3c63-4f0d-bc4c-9680593e1a19", "port_number": 0}, {"adapter_number": 1, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "f1/0", "x": -11, "y": -28}, "node_id": "a73e4d0e-2572-4945-8777-2b64919eba95", "port_number": 0}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7"}]',
'/projects/a1ea2a19-2980-41aa-81ab-f1c80be25ca7/nodes/df2f8f9c-23cf-4001-a1d1-834f0ff66436': '{"command_line": null, "compute_id": "local", "console": 5007, "console_host": "127.0.0.1", "console_type": "telnet", "first_port_name": null, "height": 45, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "C3725-1", "x": 5, "y": 20}, "name": "C3725-1", "node_directory": "/Users/maarten/GNS3/Projects/Basic 4 Routers/project-files/dynamips/df2f8f9c-23cf-4001-a1d1-834f0ff66436", "node_id": "df2f8f9c-23cf-4001-a1d1-834f0ff66436", "node_type": "dynamips", "port_name_format": "Ethernet{0}", "port_segment_size": 0, "ports": [{"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet0/0", "port_number": 0, "short_name": "f0/0"}, {"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet0/1", "port_number": 1, "short_name": "f0/1"}, {"adapter_number": 1, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet1/0", "port_number": 0, "short_name": "f1/0"}, {"adapter_number": 2, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet2/0", "port_number": 0, "short_name": "f2/0"}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "properties": {"auto_delete_disks": true, "aux": null, "clock_divisor": 8, "disk0": 0, "disk1": 0, "dynamips_id": 3, "exec_area": 64, "idlemax": 500, "idlepc": "0x60bf82e0", "idlesleep": 30, "image": "c3725-adventerprisek9-mz124-15.image", "image_md5sum": "1c950444f3261338c3d42e72a6ded980", "iomem": 5, "mac_addr": "c203.057a.0000", "mmap": true, "nvram": 256, "platform": "c3725", "private_config": "", "private_config_content": "", "ram": 128, "slot0": "GT96100-FE", "slot1": "NM-1FE-TX", "slot2": "NM-1FE-TX", "sparsemem": true, "startup_config": "configs/i3_startup-config.cfg", "startup_config_content": "!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n\\n!\\nversion 12.4\\nservice timestamps debug datetime msec\\nservice timestamps log datetime msec\\nno service password-encryption\\n!\\nhostname C3725-1\\n!\\nboot-start-marker\\nboot-end-marker\\n!\\n!\\nno aaa new-model\\nmemory-size iomem 5\\nno ip icmp rate-limit unreachable\\nip cef\\n!\\n!\\n!\\n!\\nno ip domain lookup\\n!\\nmultilink bundle-name authenticated\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\narchive\\n log config\\n hidekeys\\n! \\n!\\n!\\n!\\nip tcp synwait-time 5\\n!\\n!\\n!\\n!\\ninterface FastEthernet0/0\\n ip address 10.0.0.1 255.255.255.252\\n speed 100\\n full-duplex\\n!\\ninterface FastEthernet0/1\\n ip address 10.0.0.9 255.255.255.252\\n speed 100\\n full-duplex\\n!\\ninterface FastEthernet1/0\\n no ip address\\n shutdown\\n duplex auto\\n speed auto\\n!\\ninterface FastEthernet2/0\\n ip address 192.168.10.254 255.255.255.0\\n duplex auto\\n speed auto\\n!\\nip forward-protocol nd\\n!\\n!\\nno ip http server\\nno ip http secure-server\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\ncontrol-plane\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\nalias interface shit shutdown\\n!\\nline con 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\nline aux 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\nline vty 0 4\\n login\\n!\\n!\\nend\\n", "system_id": "FTX0945W0MY", "wic0": null, "wic1": null, "wic2": null}, "status": "stopped", "symbol": ":/symbols/router.svg", "width": 66, "x": -333, "y": -172, "z": 1}',
'/projects/a1ea2a19-2980-41aa-81ab-f1c80be25ca7/nodes/7e6c9433-dbab-4b34-a731-2b43a7f77fef': '{"command_line": null, "compute_id": "local", "console": 5004, "console_host": "127.0.0.1", "console_type": "telnet", "first_port_name": null, "height": 45, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "C7200-2", "x": 5, "y": 23}, "name": "C7200-2", "node_directory": "/Users/maarten/GNS3/Projects/Basic 4 Routers/project-files/dynamips/7e6c9433-dbab-4b34-a731-2b43a7f77fef", "node_id": "7e6c9433-dbab-4b34-a731-2b43a7f77fef", "node_type": "dynamips", "port_name_format": "Ethernet{0}", "port_segment_size": 0, "ports": [{"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet0/0", "port_number": 0, "short_name": "f0/0"}, {"adapter_number": 1, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet1/0", "port_number": 0, "short_name": "f1/0"}, {"adapter_number": 1, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet1/1", "port_number": 1, "short_name": "f1/1"}, {"adapter_number": 2, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet2/0", "port_number": 0, "short_name": "f2/0"}, {"adapter_number": 2, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet2/1", "port_number": 1, "short_name": "f2/1"}, {"adapter_number": 3, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "GigabitEthernet3/0", "port_number": 0, "short_name": "g3/0"}, {"adapter_number": 4, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "GigabitEthernet4/0", "port_number": 0, "short_name": "g4/0"}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "properties": {"auto_delete_disks": true, "aux": null, "clock_divisor": 4, "disk0": 0, "disk1": 0, "dynamips_id": 2, "exec_area": 64, "idlemax": 500, "idlepc": "0x63184bc8", "idlesleep": 30, "image": "c7200-advipservicesk9-mz.152-4.S5.image", "image_md5sum": "cbbbea66a253f1dac0fcf81274dc778d", "mac_addr": "ca02.0579.0000", "midplane": "vxr", "mmap": true, "npe": "npe-400", "nvram": 512, "platform": "c7200", "power_supplies": [1, 1], "private_config": "/Users/maarten/GNS3/Projects/Basic 4 Routers/project-files/dynamips/7e6c9433-dbab-4b34-a731-2b43a7f77fef/configs/i2_private-config.cfg", "private_config_content": "\\nend\\n", "ram": 512, "sensors": [22, 22, 22, 22], "slot0": "C7200-IO-FE", "slot1": "PA-2FE-TX", "slot2": "PA-2FE-TX", "slot3": "PA-GE", "slot4": "PA-GE", "slot5": null, "slot6": null, "sparsemem": true, "startup_config": "configs/i2_startup-config.cfg", "startup_config_content": "!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n\\n!\\n! Last configuration change at 13:40:30 UTC Wed Aug 2 2017\\n!\\nversion 15.2\\nservice timestamps debug datetime msec\\nservice timestamps log datetime msec\\n!\\nhostname C7200-2\\n!\\nboot-start-marker\\nboot-end-marker\\n!\\n!\\n!\\nno aaa new-model\\nno ip icmp rate-limit unreachable\\nip cef\\n!\\n!\\n!\\n!\\n!\\n!\\nno ip domain lookup\\nno ipv6 cef\\n!\\n!\\nmultilink bundle-name authenticated\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\nip tcp synwait-time 5\\n! \\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\ninterface FastEthernet0/0\\n ip address 10.0.0.10 255.255.255.252\\n duplex full\\n!\\ninterface FastEthernet1/0\\n ip address 10.0.0.13 255.255.255.252\\n speed auto\\n duplex auto\\n!\\ninterface FastEthernet1/1\\n no ip address\\n shutdown\\n speed auto\\n duplex auto\\n!\\ninterface FastEthernet2/0\\n no ip address\\n shutdown\\n speed auto\\n duplex auto\\n!\\ninterface FastEthernet2/1\\n no ip address\\n shutdown\\n speed auto\\n duplex auto\\n!\\ninterface GigabitEthernet3/0\\n no ip address\\n shutdown\\n negotiation auto\\n!\\ninterface GigabitEthernet4/0\\n no ip address\\n shutdown\\n negotiation auto\\n!\\nip forward-protocol nd\\n!\\n!\\nno ip http server\\nno ip http secure-server\\n!\\n!\\n!\\n!\\ncontrol-plane\\n!\\n!\\nline con 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\n stopbits 1\\nline aux 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\n stopbits 1\\nline vty 0 4\\n login\\n!\\n!\\nend\\n", "system_id": "FTX0945W0MY"}, "status": "stopped", "symbol": ":/symbols/router.svg", "width": 66, "x": -333, "y": 128, "z": 1}',
'/projects/a1ea2a19-2980-41aa-81ab-f1c80be25ca7/nodes/61c67710-3c63-4f0d-bc4c-9680593e1a19': '{"command_line": null, "compute_id": "local", "console": 5002, "console_host": "127.0.0.1", "console_type": "telnet", "first_port_name": null, "height": 45, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "C7200-1", "x": 8, "y": 21}, "name": "C7200-1", "node_directory": "/Users/maarten/GNS3/Projects/Basic 4 Routers/project-files/dynamips/61c67710-3c63-4f0d-bc4c-9680593e1a19", "node_id": "61c67710-3c63-4f0d-bc4c-9680593e1a19", "node_type": "dynamips", "port_name_format": "Ethernet{0}", "port_segment_size": 0, "ports": [{"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet0/0", "port_number": 0, "short_name": "f0/0"}, {"adapter_number": 1, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet1/0", "port_number": 0, "short_name": "f1/0"}, {"adapter_number": 1, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet1/1", "port_number": 1, "short_name": "f1/1"}, {"adapter_number": 2, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet2/0", "port_number": 0, "short_name": "f2/0"}, {"adapter_number": 2, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet2/1", "port_number": 1, "short_name": "f2/1"}, {"adapter_number": 3, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "GigabitEthernet3/0", "port_number": 0, "short_name": "g3/0"}, {"adapter_number": 4, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "GigabitEthernet4/0", "port_number": 0, "short_name": "g4/0"}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "properties": {"auto_delete_disks": true, "aux": null, "clock_divisor": 4, "disk0": 0, "disk1": 0, "dynamips_id": 1, "exec_area": 64, "idlemax": 500, "idlepc": "0x63184bc8", "idlesleep": 30, "image": "c7200-advipservicesk9-mz.152-4.S5.image", "image_md5sum": "cbbbea66a253f1dac0fcf81274dc778d", "mac_addr": "ca01.0578.0000", "midplane": "vxr", "mmap": true, "npe": "npe-400", "nvram": 512, "platform": "c7200", "power_supplies": [1, 1], "private_config": "/Users/maarten/GNS3/projects/Basic 4 Routers/project-files/dynamips/61c67710-3c63-4f0d-bc4c-9680593e1a19/configs/i1_private-config.cfg", "private_config_content": "\\nend\\n", "ram": 512, "sensors": [22, 22, 22, 22], "slot0": "C7200-IO-FE", "slot1": "PA-2FE-TX", "slot2": "PA-2FE-TX", "slot3": "PA-GE", "slot4": "PA-GE", "slot5": null, "slot6": null, "sparsemem": true, "startup_config": "configs/i1_startup-config.cfg", "startup_config_content": "!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n\\n!\\n! Last configuration change at 13:40:22 UTC Wed Aug 2 2017\\n!\\nversion 15.2\\nservice timestamps debug datetime msec\\nservice timestamps log datetime msec\\n!\\nhostname C7200-1\\n!\\nboot-start-marker\\nboot-end-marker\\n!\\n!\\n!\\nno aaa new-model\\nno ip icmp rate-limit unreachable\\nip cef\\n!\\n!\\n!\\n!\\n!\\n!\\nno ip domain lookup\\nno ipv6 cef\\n!\\n!\\nmultilink bundle-name authenticated\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\nip tcp synwait-time 5\\n! \\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\ninterface FastEthernet0/0\\n ip address 10.0.0.2 255.255.255.252\\n duplex full\\n!\\ninterface FastEthernet1/0\\n ip address 10.0.0.5 255.255.255.252\\n speed auto\\n duplex full\\n!\\ninterface FastEthernet1/1\\n no ip address\\n shutdown\\n speed auto\\n duplex auto\\n!\\ninterface FastEthernet2/0\\n no ip address\\n shutdown\\n speed auto\\n duplex auto\\n!\\ninterface FastEthernet2/1\\n no ip address\\n shutdown\\n speed auto\\n duplex auto\\n!\\ninterface GigabitEthernet3/0\\n no ip address\\n shutdown\\n negotiation auto\\n!\\ninterface GigabitEthernet4/0\\n no ip address\\n shutdown\\n negotiation auto\\n!\\nip forward-protocol nd\\n!\\n!\\nno ip http server\\nno ip http secure-server\\n!\\n!\\n!\\n!\\ncontrol-plane\\n!\\n!\\nline con 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\n stopbits 1\\nline aux 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\n stopbits 1\\nline vty 0 4\\n login\\n!\\n!\\nend\\n", "system_id": "FTX0945W0MY"}, "status": "stopped", "symbol": ":/symbols/router.svg", "width": 66, "x": 117, "y": -173, "z": 1}',
'/projects/a1ea2a19-2980-41aa-81ab-f1c80be25ca7/nodes/a73e4d0e-2572-4945-8777-2b64919eba95': '{"command_line": null, "compute_id": "local", "console": 5003, "console_host": "127.0.0.1", "console_type": "telnet", "first_port_name": null, "height": 45, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "C3725-2", "x": 6, "y": 22}, "name": "C3725-2", "node_directory": "/Users/maarten/GNS3/Projects/Basic 4 Routers/project-files/dynamips/a73e4d0e-2572-4945-8777-2b64919eba95", "node_id": "a73e4d0e-2572-4945-8777-2b64919eba95", "node_type": "dynamips", "port_name_format": "Ethernet{0}", "port_segment_size": 0, "ports": [{"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet0/0", "port_number": 0, "short_name": "f0/0"}, {"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet0/1", "port_number": 1, "short_name": "f0/1"}, {"adapter_number": 1, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet1/0", "port_number": 0, "short_name": "f1/0"}, {"adapter_number": 2, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet2/0", "port_number": 0, "short_name": "f2/0"}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "properties": {"auto_delete_disks": true, "aux": null, "clock_divisor": 8, "disk0": 0, "disk1": 0, "dynamips_id": 4, "exec_area": 64, "idlemax": 500, "idlepc": "0x60bf82e0", "idlesleep": 30, "image": "c3725-adventerprisek9-mz124-15.image", "image_md5sum": "1c950444f3261338c3d42e72a6ded980", "iomem": 5, "mac_addr": "c204.057b.0000", "mmap": true, "nvram": 256, "platform": "c3725", "private_config": "", "private_config_content": "", "ram": 128, "slot0": "GT96100-FE", "slot1": "NM-1FE-TX", "slot2": "NM-1FE-TX", "sparsemem": true, "startup_config": "configs/i4_startup-config.cfg", "startup_config_content": "!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n\\n!\\nversion 12.4\\nservice timestamps debug datetime msec\\nservice timestamps log datetime msec\\nno service password-encryption\\n!\\nhostname C3725-2\\n!\\nboot-start-marker\\nboot-end-marker\\n!\\n!\\nno aaa new-model\\nmemory-size iomem 5\\nno ip icmp rate-limit unreachable\\nip cef\\n!\\n!\\n!\\n!\\nno ip domain lookup\\n!\\nmultilink bundle-name authenticated\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\narchive\\n log config\\n hidekeys\\n! \\n!\\n!\\n!\\nip tcp synwait-time 5\\n!\\n!\\n!\\n!\\ninterface FastEthernet0/0\\n no ip address\\n shutdown\\n speed 100\\n full-duplex\\n!\\ninterface FastEthernet0/1\\n no ip address\\n shutdown\\n speed 100\\n full-duplex\\n!\\ninterface FastEthernet1/0\\n no ip address\\n shutdown\\n speed 100\\n full-duplex\\n!\\ninterface FastEthernet2/0\\n no ip address\\n speed 100\\n full-duplex\\n!\\nip forward-protocol nd\\n!\\n!\\nno ip http server\\nno ip http secure-server\\n!\\nno cdp log mismatch duplex\\n!\\n!\\n!\\n!\\n!\\n!\\ncontrol-plane\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\nline con 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\nline aux 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\nline vty 0 4\\n login\\n!\\n!\\nend\\n", "system_id": "FTX0945W0MY", "wic0": null, "wic1": null, "wic2": null}, "status": "stopped", "symbol": ":/symbols/router.svg", "width": 66, "x": 116, "y": 129, "z": 1}',
'/projects/a1ea2a19-2980-41aa-81ab-f1c80be25ca7/nodes/6f58d4cf-2aea-40e4-9d1b-e5bf20f3d51a': '{"command_line": "", "compute_id": "local", "console": 5006, "console_host": "127.0.0.1", "console_type": "telnet", "first_port_name": null, "height": 59, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "PC2", "x": 18, "y": -25}, "name": "PC2", "node_directory": "/Users/maarten/GNS3/Projects/Basic 4 Routers/project-files/vpcs/6f58d4cf-2aea-40e4-9d1b-e5bf20f3d51a", "node_id": "6f58d4cf-2aea-40e4-9d1b-e5bf20f3d51a", "node_type": "vpcs", "port_name_format": "Ethernet{0}", "port_segment_size": 0, "ports": [{"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "Ethernet0", "port_number": 0, "short_name": "e0"}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "properties": {"startup_script": "set pcname PC2\\nip 192.168.20.1 192.168.20.254 24\\n", "startup_script_path": "startup.vpc"}, "status": "stopped", "symbol": ":/symbols/vpcs_guest.svg", "width": 65, "x": 342, "y": 120, "z": 1}',
'/projects/a1ea2a19-2980-41aa-81ab-f1c80be25ca7/nodes/be1673f7-b534-4263-bf83-ac05eb618360': '{"command_line": "", "compute_id": "local", "console": 5005, "console_host": "127.0.0.1", "console_type": "telnet", "first_port_name": null, "height": 59, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "PC1", "x": 18, "y": -25}, "name": "PC1", "node_directory": "/Users/maarten/GNS3/Projects/Basic 4 Routers/project-files/vpcs/be1673f7-b534-4263-bf83-ac05eb618360", "node_id": "be1673f7-b534-4263-bf83-ac05eb618360", "node_type": "vpcs", "port_name_format": "Ethernet{0}", "port_segment_size": 0, "ports": [{"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "Ethernet0", "port_number": 0, "short_name": "e0"}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "properties": {"startup_script": "set pcname PC1\\nip 192.168.10.1 192.168.10.254 24\\n", "startup_script_path": "startup.vpc"}, "status": "stopped", "symbol": ":/symbols/vpcs_guest.svg", "width": 65, "x": -482, "y": -179, "z": 1}',
'/projects/a1ea2a19-2980-41aa-81ab-f1c80be25ca7/nodes': '[{"command_line": null, "compute_id": "local", "console": 5003, "console_host": "127.0.0.1", "console_type": "telnet", "first_port_name": null, "height": 45, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "C3725-2", "x": 6, "y": 22}, "name": "C3725-2", "node_directory": "/Users/maarten/GNS3/Projects/Basic 4 Routers/project-files/dynamips/a73e4d0e-2572-4945-8777-2b64919eba95", "node_id": "a73e4d0e-2572-4945-8777-2b64919eba95", "node_type": "dynamips", "port_name_format": "Ethernet{0}", "port_segment_size": 0, "ports": [{"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet0/0", "port_number": 0, "short_name": "f0/0"}, {"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet0/1", "port_number": 1, "short_name": "f0/1"}, {"adapter_number": 1, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet1/0", "port_number": 0, "short_name": "f1/0"}, {"adapter_number": 2, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet2/0", "port_number": 0, "short_name": "f2/0"}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "properties": {"auto_delete_disks": true, "aux": null, "clock_divisor": 8, "disk0": 0, "disk1": 0, "dynamips_id": 4, "exec_area": 64, "idlemax": 500, "idlepc": "0x60bf82e0", "idlesleep": 30, "image": "c3725-adventerprisek9-mz124-15.image", "image_md5sum": "1c950444f3261338c3d42e72a6ded980", "iomem": 5, "mac_addr": "c204.057b.0000", "mmap": true, "nvram": 256, "platform": "c3725", "private_config": "", "private_config_content": "", "ram": 128, "slot0": "GT96100-FE", "slot1": "NM-1FE-TX", "slot2": "NM-1FE-TX", "sparsemem": true, "startup_config": "configs/i4_startup-config.cfg", "startup_config_content": "!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n\\n!\\nversion 12.4\\nservice timestamps debug datetime msec\\nservice timestamps log datetime msec\\nno service password-encryption\\n!\\nhostname C3725-2\\n!\\nboot-start-marker\\nboot-end-marker\\n!\\n!\\nno aaa new-model\\nmemory-size iomem 5\\nno ip icmp rate-limit unreachable\\nip cef\\n!\\n!\\n!\\n!\\nno ip domain lookup\\n!\\nmultilink bundle-name authenticated\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\narchive\\n log config\\n hidekeys\\n! \\n!\\n!\\n!\\nip tcp synwait-time 5\\n!\\n!\\n!\\n!\\ninterface FastEthernet0/0\\n no ip address\\n shutdown\\n speed 100\\n full-duplex\\n!\\ninterface FastEthernet0/1\\n no ip address\\n shutdown\\n speed 100\\n full-duplex\\n!\\ninterface FastEthernet1/0\\n no ip address\\n shutdown\\n speed 100\\n full-duplex\\n!\\ninterface FastEthernet2/0\\n no ip address\\n speed 100\\n full-duplex\\n!\\nip forward-protocol nd\\n!\\n!\\nno ip http server\\nno ip http secure-server\\n!\\nno cdp log mismatch duplex\\n!\\n!\\n!\\n!\\n!\\n!\\ncontrol-plane\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\nline con 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\nline aux 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\nline vty 0 4\\n login\\n!\\n!\\nend\\n", "system_id": "FTX0945W0MY", "wic0": null, "wic1": null, "wic2": null}, "status": "stopped", "symbol": ":/symbols/router.svg", "width": 66, "x": 116, "y": 129, "z": 1}, {"command_line": null, "compute_id": "local", "console": 5002, "console_host": "127.0.0.1", "console_type": "telnet", "first_port_name": null, "height": 45, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "C7200-1", "x": 8, "y": 21}, "name": "C7200-1", "node_directory": "/Users/maarten/GNS3/Projects/Basic 4 Routers/project-files/dynamips/61c67710-3c63-4f0d-bc4c-9680593e1a19", "node_id": "61c67710-3c63-4f0d-bc4c-9680593e1a19", "node_type": "dynamips", "port_name_format": "Ethernet{0}", "port_segment_size": 0, "ports": [{"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet0/0", "port_number": 0, "short_name": "f0/0"}, {"adapter_number": 1, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet1/0", "port_number": 0, "short_name": "f1/0"}, {"adapter_number": 1, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet1/1", "port_number": 1, "short_name": "f1/1"}, {"adapter_number": 2, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet2/0", "port_number": 0, "short_name": "f2/0"}, {"adapter_number": 2, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet2/1", "port_number": 1, "short_name": "f2/1"}, {"adapter_number": 3, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "GigabitEthernet3/0", "port_number": 0, "short_name": "g3/0"}, {"adapter_number": 4, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "GigabitEthernet4/0", "port_number": 0, "short_name": "g4/0"}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "properties": {"auto_delete_disks": true, "aux": null, "clock_divisor": 4, "disk0": 0, "disk1": 0, "dynamips_id": 1, "exec_area": 64, "idlemax": 500, "idlepc": "0x63184bc8", "idlesleep": 30, "image": "c7200-advipservicesk9-mz.152-4.S5.image", "image_md5sum": "cbbbea66a253f1dac0fcf81274dc778d", "mac_addr": "ca01.0578.0000", "midplane": "vxr", "mmap": true, "npe": "npe-400", "nvram": 512, "platform": "c7200", "power_supplies": [1, 1], "private_config": "/Users/maarten/GNS3/projects/Basic 4 Routers/project-files/dynamips/61c67710-3c63-4f0d-bc4c-9680593e1a19/configs/i1_private-config.cfg", "private_config_content": "\\nend\\n", "ram": 512, "sensors": [22, 22, 22, 22], "slot0": "C7200-IO-FE", "slot1": "PA-2FE-TX", "slot2": "PA-2FE-TX", "slot3": "PA-GE", "slot4": "PA-GE", "slot5": null, "slot6": null, "sparsemem": true, "startup_config": "configs/i1_startup-config.cfg", "startup_config_content": "!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n\\n!\\n! Last configuration change at 13:40:22 UTC Wed Aug 2 2017\\n!\\nversion 15.2\\nservice timestamps debug datetime msec\\nservice timestamps log datetime msec\\n!\\nhostname C7200-1\\n!\\nboot-start-marker\\nboot-end-marker\\n!\\n!\\n!\\nno aaa new-model\\nno ip icmp rate-limit unreachable\\nip cef\\n!\\n!\\n!\\n!\\n!\\n!\\nno ip domain lookup\\nno ipv6 cef\\n!\\n!\\nmultilink bundle-name authenticated\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\nip tcp synwait-time 5\\n! \\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\ninterface FastEthernet0/0\\n ip address 10.0.0.2 255.255.255.252\\n duplex full\\n!\\ninterface FastEthernet1/0\\n ip address 10.0.0.5 255.255.255.252\\n speed auto\\n duplex full\\n!\\ninterface FastEthernet1/1\\n no ip address\\n shutdown\\n speed auto\\n duplex auto\\n!\\ninterface FastEthernet2/0\\n no ip address\\n shutdown\\n speed auto\\n duplex auto\\n!\\ninterface FastEthernet2/1\\n no ip address\\n shutdown\\n speed auto\\n duplex auto\\n!\\ninterface GigabitEthernet3/0\\n no ip address\\n shutdown\\n negotiation auto\\n!\\ninterface GigabitEthernet4/0\\n no ip address\\n shutdown\\n negotiation auto\\n!\\nip forward-protocol nd\\n!\\n!\\nno ip http server\\nno ip http secure-server\\n!\\n!\\n!\\n!\\ncontrol-plane\\n!\\n!\\nline con 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\n stopbits 1\\nline aux 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\n stopbits 1\\nline vty 0 4\\n login\\n!\\n!\\nend\\n", "system_id": "FTX0945W0MY"}, "status": "stopped", "symbol": ":/symbols/router.svg", "width": 66, "x": 117, "y": -173, "z": 1}, {"command_line": "", "compute_id": "local", "console": 5005, "console_host": "127.0.0.1", "console_type": "telnet", "first_port_name": null, "height": 59, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "PC1", "x": 18, "y": -25}, "name": "PC1", "node_directory": "/Users/maarten/GNS3/Projects/Basic 4 Routers/project-files/vpcs/be1673f7-b534-4263-bf83-ac05eb618360", "node_id": "be1673f7-b534-4263-bf83-ac05eb618360", "node_type": "vpcs", "port_name_format": "Ethernet{0}", "port_segment_size": 0, "ports": [{"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "Ethernet0", "port_number": 0, "short_name": "e0"}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "properties": {"startup_script": "set pcname PC1\\nip 192.168.10.1 192.168.10.254 24\\n", "startup_script_path": "startup.vpc"}, "status": "stopped", "symbol": ":/symbols/vpcs_guest.svg", "width": 65, "x": -482, "y": -179, "z": 1}, {"command_line": null, "compute_id": "local", "console": 5007, "console_host": "127.0.0.1", "console_type": "telnet", "first_port_name": null, "height": 45, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "C3725-1", "x": 5, "y": 20}, "name": "C3725-1", "node_directory": "/Users/maarten/GNS3/Projects/Basic 4 Routers/project-files/dynamips/df2f8f9c-23cf-4001-a1d1-834f0ff66436", "node_id": "df2f8f9c-23cf-4001-a1d1-834f0ff66436", "node_type": "dynamips", "port_name_format": "Ethernet{0}", "port_segment_size": 0, "ports": [{"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet0/0", "port_number": 0, "short_name": "f0/0"}, {"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet0/1", "port_number": 1, "short_name": "f0/1"}, {"adapter_number": 1, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet1/0", "port_number": 0, "short_name": "f1/0"}, {"adapter_number": 2, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet2/0", "port_number": 0, "short_name": "f2/0"}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "properties": {"auto_delete_disks": true, "aux": null, "clock_divisor": 8, "disk0": 0, "disk1": 0, "dynamips_id": 3, "exec_area": 64, "idlemax": 500, "idlepc": "0x60bf82e0", "idlesleep": 30, "image": "c3725-adventerprisek9-mz124-15.image", "image_md5sum": "1c950444f3261338c3d42e72a6ded980", "iomem": 5, "mac_addr": "c203.057a.0000", "mmap": true, "nvram": 256, "platform": "c3725", "private_config": "", "private_config_content": "", "ram": 128, "slot0": "GT96100-FE", "slot1": "NM-1FE-TX", "slot2": "NM-1FE-TX", "sparsemem": true, "startup_config": "configs/i3_startup-config.cfg", "startup_config_content": "!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n\\n!\\nversion 12.4\\nservice timestamps debug datetime msec\\nservice timestamps log datetime msec\\nno service password-encryption\\n!\\nhostname C3725-1\\n!\\nboot-start-marker\\nboot-end-marker\\n!\\n!\\nno aaa new-model\\nmemory-size iomem 5\\nno ip icmp rate-limit unreachable\\nip cef\\n!\\n!\\n!\\n!\\nno ip domain lookup\\n!\\nmultilink bundle-name authenticated\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\narchive\\n log config\\n hidekeys\\n! \\n!\\n!\\n!\\nip tcp synwait-time 5\\n!\\n!\\n!\\n!\\ninterface FastEthernet0/0\\n ip address 10.0.0.1 255.255.255.252\\n speed 100\\n full-duplex\\n!\\ninterface FastEthernet0/1\\n ip address 10.0.0.9 255.255.255.252\\n speed 100\\n full-duplex\\n!\\ninterface FastEthernet1/0\\n no ip address\\n shutdown\\n duplex auto\\n speed auto\\n!\\ninterface FastEthernet2/0\\n ip address 192.168.10.254 255.255.255.0\\n duplex auto\\n speed auto\\n!\\nip forward-protocol nd\\n!\\n!\\nno ip http server\\nno ip http secure-server\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\ncontrol-plane\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\nalias interface shit shutdown\\n!\\nline con 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\nline aux 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\nline vty 0 4\\n login\\n!\\n!\\nend\\n", "system_id": "FTX0945W0MY", "wic0": null, "wic1": null, "wic2": null}, "status": "stopped", "symbol": ":/symbols/router.svg", "width": 66, "x": -333, "y": -172, "z": 1}, {"command_line": null, "compute_id": "local", "console": 5004, "console_host": "127.0.0.1", "console_type": "telnet", "first_port_name": null, "height": 45, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "C7200-2", "x": 5, "y": 23}, "name": "C7200-2", "node_directory": "/Users/maarten/GNS3/Projects/Basic 4 Routers/project-files/dynamips/7e6c9433-dbab-4b34-a731-2b43a7f77fef", "node_id": "7e6c9433-dbab-4b34-a731-2b43a7f77fef", "node_type": "dynamips", "port_name_format": "Ethernet{0}", "port_segment_size": 0, "ports": [{"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet0/0", "port_number": 0, "short_name": "f0/0"}, {"adapter_number": 1, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet1/0", "port_number": 0, "short_name": "f1/0"}, {"adapter_number": 1, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet1/1", "port_number": 1, "short_name": "f1/1"}, {"adapter_number": 2, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet2/0", "port_number": 0, "short_name": "f2/0"}, {"adapter_number": 2, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "FastEthernet2/1", "port_number": 1, "short_name": "f2/1"}, {"adapter_number": 3, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "GigabitEthernet3/0", "port_number": 0, "short_name": "g3/0"}, {"adapter_number": 4, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "GigabitEthernet4/0", "port_number": 0, "short_name": "g4/0"}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "properties": {"auto_delete_disks": true, "aux": null, "clock_divisor": 4, "disk0": 0, "disk1": 0, "dynamips_id": 2, "exec_area": 64, "idlemax": 500, "idlepc": "0x63184bc8", "idlesleep": 30, "image": "c7200-advipservicesk9-mz.152-4.S5.image", "image_md5sum": "cbbbea66a253f1dac0fcf81274dc778d", "mac_addr": "ca02.0579.0000", "midplane": "vxr", "mmap": true, "npe": "npe-400", "nvram": 512, "platform": "c7200", "power_supplies": [1, 1], "private_config": "/Users/maarten/GNS3/Projects/Basic 4 Routers/project-files/dynamips/7e6c9433-dbab-4b34-a731-2b43a7f77fef/configs/i2_private-config.cfg", "private_config_content": "\\nend\\n", "ram": 512, "sensors": [22, 22, 22, 22], "slot0": "C7200-IO-FE", "slot1": "PA-2FE-TX", "slot2": "PA-2FE-TX", "slot3": "PA-GE", "slot4": "PA-GE", "slot5": null, "slot6": null, "sparsemem": true, "startup_config": "configs/i2_startup-config.cfg", "startup_config_content": "!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n\\n!\\n! Last configuration change at 13:40:30 UTC Wed Aug 2 2017\\n!\\nversion 15.2\\nservice timestamps debug datetime msec\\nservice timestamps log datetime msec\\n!\\nhostname C7200-2\\n!\\nboot-start-marker\\nboot-end-marker\\n!\\n!\\n!\\nno aaa new-model\\nno ip icmp rate-limit unreachable\\nip cef\\n!\\n!\\n!\\n!\\n!\\n!\\nno ip domain lookup\\nno ipv6 cef\\n!\\n!\\nmultilink bundle-name authenticated\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\nip tcp synwait-time 5\\n! \\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\n!\\ninterface FastEthernet0/0\\n ip address 10.0.0.10 255.255.255.252\\n duplex full\\n!\\ninterface FastEthernet1/0\\n ip address 10.0.0.13 255.255.255.252\\n speed auto\\n duplex auto\\n!\\ninterface FastEthernet1/1\\n no ip address\\n shutdown\\n speed auto\\n duplex auto\\n!\\ninterface FastEthernet2/0\\n no ip address\\n shutdown\\n speed auto\\n duplex auto\\n!\\ninterface FastEthernet2/1\\n no ip address\\n shutdown\\n speed auto\\n duplex auto\\n!\\ninterface GigabitEthernet3/0\\n no ip address\\n shutdown\\n negotiation auto\\n!\\ninterface GigabitEthernet4/0\\n no ip address\\n shutdown\\n negotiation auto\\n!\\nip forward-protocol nd\\n!\\n!\\nno ip http server\\nno ip http secure-server\\n!\\n!\\n!\\n!\\ncontrol-plane\\n!\\n!\\nline con 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\n stopbits 1\\nline aux 0\\n exec-timeout 0 0\\n privilege level 15\\n logging synchronous\\n stopbits 1\\nline vty 0 4\\n login\\n!\\n!\\nend\\n", "system_id": "FTX0945W0MY"}, "status": "stopped", "symbol": ":/symbols/router.svg", "width": 66, "x": -333, "y": 128, "z": 1}, {"command_line": "", "compute_id": "local", "console": 5006, "console_host": "127.0.0.1", "console_type": "telnet", "first_port_name": null, "height": 59, "label": {"rotation": 0, "style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;", "text": "PC2", "x": 18, "y": -25}, "name": "PC2", "node_directory": "/Users/maarten/GNS3/Projects/Basic 4 Routers/project-files/vpcs/6f58d4cf-2aea-40e4-9d1b-e5bf20f3d51a", "node_id": "6f58d4cf-2aea-40e4-9d1b-e5bf20f3d51a", "node_type": "vpcs", "port_name_format": "Ethernet{0}", "port_segment_size": 0, "ports": [{"adapter_number": 0, "data_link_types": {"Ethernet": "DLT_EN10MB"}, "link_type": "ethernet", "name": "Ethernet0", "port_number": 0, "short_name": "e0"}], "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "properties": {"startup_script": "set pcname PC2\\nip 192.168.20.1 192.168.20.254 24\\n", "startup_script_path": "startup.vpc"}, "status": "stopped", "symbol": ":/symbols/vpcs_guest.svg", "width": 65, "x": 342, "y": 120, "z": 1}]',
'/projects/a1ea2a19-2980-41aa-81ab-f1c80be25ca7/snapshots': '[{"created_at": 1502551604, "name": "Test Snapshot", "project_id": "a1ea2a19-2980-41aa-81ab-f1c80be25ca7", "snapshot_id": "de81908c-94f0-449d-bb03-79b82b0adf05"}]',
'/projects/5daa48ff-dbd6-407c-a3c6-645e743f233a': '{"auto_close": true, "auto_open": false, "auto_start": false, "filename": "Basic Cloud Connection.gns3", "name": "Basic Cloud Connection", "path": "/Users/maarten/GNS3/projects/Basic Cloud Connection", "project_id": "5daa48ff-dbd6-407c-a3c6-645e743f233a", "scene_height": 1000, "scene_width": 2000, "status": "closed"}',
'/projects/5daa48ff-dbd6-407c-a3c6-645e743f233a/drawings': '[]',
'/projects/5daa48ff-dbd6-407c-a3c6-645e743f233a/links': '[]',
'/projects/5daa48ff-dbd6-407c-a3c6-645e743f233a/nodes': '[]',
'/projects/5daa48ff-dbd6-407c-a3c6-645e743f233a/snapshots': '[]'}
| 1,790.115385
| 17,854
| 0.669703
| 7,086
| 46,543
| 4.285916
| 0.070703
| 0.029108
| 0.035759
| 0.04083
| 0.971683
| 0.962825
| 0.952519
| 0.946065
| 0.944682
| 0.933948
| 0
| 0.118661
| 0.081086
| 46,543
| 25
| 17,855
| 1,861.72
| 0.591433
| 0.000773
| 0
| 0
| 0
| 1
| 0.994279
| 0.293017
| 0
| 0
| 0.00172
| 0
| 0
| 1
| 0
| false
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 11
|
3c2938ff69b9daf464b3f0d83e706655ebcfdfa3
| 1,233
|
py
|
Python
|
src/arch/x86/isa/insts/simd512/integer/arithmetic/vpaddd.py
|
jyhuang91/gem5-avx
|
f988da46080f8db49beb39e20af437219f3aa4cb
|
[
"BSD-3-Clause"
] | 2
|
2021-01-15T17:32:18.000Z
|
2021-12-21T02:53:58.000Z
|
src/arch/x86/isa/insts/simd512/integer/arithmetic/vpaddd.py
|
jyhuang91/gem5-avx
|
f988da46080f8db49beb39e20af437219f3aa4cb
|
[
"BSD-3-Clause"
] | 3
|
2021-03-26T20:33:59.000Z
|
2022-01-24T22:54:03.000Z
|
src/arch/x86/isa/insts/simd512/integer/arithmetic/vpaddd.py
|
jyhuang91/gem5-avx
|
f988da46080f8db49beb39e20af437219f3aa4cb
|
[
"BSD-3-Clause"
] | 3
|
2021-03-27T16:36:19.000Z
|
2022-03-28T18:32:57.000Z
|
microcode = '''
def macroop VPADDD_XMM_XMM {
vaddi dest=xmm0, src1=xmm0v, src2=xmm0m, size=4, VL=16
};
def macroop VPADDD_XMM_M {
ldfp128 ufp1, seg, sib, "DISPLACEMENT + 0", dataSize=16
vaddi dest=xmm0, src1=xmm0v, src2=ufp1, size=4, VL=16
};
def macroop VPADDD_XMM_P {
rdip t7
ldfp128 ufp1, seg, riprel, "DISPLACEMENT + 0", dataSize=16
vaddi dest=xmm0, src1=xmm0v, src2=ufp1, size=4, VL=16
};
def macroop VPADDD_YMM_YMM {
vaddi dest=xmm0, src1=xmm0v, src2=xmm0m, size=4, VL=32
};
def macroop VPADDD_YMM_M {
ldfp256 ufp1, seg, sib, "DISPLACEMENT + 0", dataSize=32
vaddi dest=xmm0, src1=xmm0v, src2=ufp1, size=4, VL=32
};
def macroop VPADDD_YMM_P {
rdip t7
ldfp256 ufp1, seg, riprel, "DISPLACEMENT + 0", dataSize=32
vaddi dest=xmm0, src1=xmm0v, src2=ufp1, size=4, VL=32
};
def macroop VPADDD_ZMM_ZMM {
vaddi dest=xmm0, src1=xmm0v, src2=xmm0m, size=4, VL=64
};
def macroop VPADDD_ZMM_M {
ldfp512 ufp1, seg, sib, "DISPLACEMENT + 0", dataSize=64
vaddi dest=xmm0, src1=xmm0v, src2=ufp1, size=4, VL=64
};
def macroop VPADDD_ZMM_P {
rdip t7
ldfp512 ufp1, seg, riprel, "DISPLACEMENT + 0", dataSize=64
vaddi dest=xmm0, src1=xmm0v, src2=ufp1, size=4, VL=64
};
'''
| 26.804348
| 62
| 0.676399
| 199
| 1,233
| 4.100503
| 0.170854
| 0.110294
| 0.176471
| 0.1875
| 0.866422
| 0.866422
| 0.781863
| 0.781863
| 0.67402
| 0.67402
| 0
| 0.117352
| 0.191403
| 1,233
| 46
| 63
| 26.804348
| 0.701103
| 0
| 0
| 0.473684
| 0
| 0
| 0.984603
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3c4b12a3566fd47e0d5d177c7f8d70f45bdc6664
| 177
|
py
|
Python
|
frappe/patches/v5_0/expire_old_scheduler_logs.py
|
khatrijitendra/lumalock-frappe
|
b3864278dad21dde5c53604be65aa56c79e5d909
|
[
"MIT"
] | null | null | null |
frappe/patches/v5_0/expire_old_scheduler_logs.py
|
khatrijitendra/lumalock-frappe
|
b3864278dad21dde5c53604be65aa56c79e5d909
|
[
"MIT"
] | 7
|
2020-03-24T17:07:47.000Z
|
2022-03-11T23:49:25.000Z
|
frappe/patches/v5_0/expire_old_scheduler_logs.py
|
khatrijitendra/lumalock-frappe
|
b3864278dad21dde5c53604be65aa56c79e5d909
|
[
"MIT"
] | 5
|
2016-11-12T12:14:58.000Z
|
2018-03-21T15:45:45.000Z
|
import frappe
def execute():
frappe.reload_doctype("Scheduler Log")
from frappe.core.doctype.scheduler_log.scheduler_log import set_old_logs_as_seen
set_old_logs_as_seen()
| 22.125
| 81
| 0.830508
| 28
| 177
| 4.857143
| 0.535714
| 0.264706
| 0.279412
| 0.176471
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090395
| 177
| 7
| 82
| 25.285714
| 0.844721
| 0
| 0
| 0
| 0
| 0
| 0.073446
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
3c5b84cfe20a3d0b1599c213056b8a79175c436d
| 131
|
py
|
Python
|
pymontecarlo/options/__init__.py
|
pymontecarlo/pymontecarlo
|
87050041724feb17f1ccff5794e9830c3209244e
|
[
"Apache-2.0"
] | 5
|
2018-04-10T07:15:06.000Z
|
2021-07-01T15:40:29.000Z
|
pymontecarlo/options/__init__.py
|
pymontecarlo/pymontecarlo
|
87050041724feb17f1ccff5794e9830c3209244e
|
[
"Apache-2.0"
] | 73
|
2015-09-04T09:48:29.000Z
|
2022-01-03T17:49:01.000Z
|
pymontecarlo/options/__init__.py
|
pymontecarlo/pymontecarlo
|
87050041724feb17f1ccff5794e9830c3209244e
|
[
"Apache-2.0"
] | 4
|
2016-05-17T12:57:20.000Z
|
2021-01-31T10:55:24.000Z
|
from pymontecarlo.options.material import *
from pymontecarlo.options.options import *
from pymontecarlo.options.particle import *
| 32.75
| 43
| 0.839695
| 15
| 131
| 7.333333
| 0.4
| 0.436364
| 0.627273
| 0.527273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091603
| 131
| 3
| 44
| 43.666667
| 0.92437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
3c77174825cd697ca71e60879f632e2153beaf30
| 117
|
py
|
Python
|
chirun/plastex/makecourse/__init__.py
|
sthagen/chirun-ncl-chirun
|
45897319d5203b9867b5d6e00b2db1aa90a6580c
|
[
"Apache-2.0"
] | 5
|
2021-12-06T15:57:24.000Z
|
2022-01-24T20:34:00.000Z
|
chirun/plastex/makecourse/__init__.py
|
sthagen/chirun-ncl-chirun
|
45897319d5203b9867b5d6e00b2db1aa90a6580c
|
[
"Apache-2.0"
] | 38
|
2021-12-09T13:16:46.000Z
|
2022-03-30T11:42:13.000Z
|
chirun/plastex/makecourse/__init__.py
|
sthagen/chirun-ncl-chirun
|
45897319d5203b9867b5d6e00b2db1aa90a6580c
|
[
"Apache-2.0"
] | 1
|
2022-01-17T17:41:35.000Z
|
2022-01-17T17:41:35.000Z
|
from plasTeX.Packages.embed import * # noqa: F401, F403
from plasTeX.Packages.hyperref import * # noqa: F401, F403
| 39
| 59
| 0.74359
| 16
| 117
| 5.4375
| 0.5625
| 0.252874
| 0.436782
| 0.413793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 0.153846
| 117
| 2
| 60
| 58.5
| 0.757576
| 0.282051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
3c8d7288eb491283a49067c0bd6fe2072edeb5c7
| 244
|
py
|
Python
|
tests/model/__init__.py
|
pacman82/gpt-neox
|
77f137563d7ae370d05744badd2decafe4a3dbcd
|
[
"Apache-2.0"
] | null | null | null |
tests/model/__init__.py
|
pacman82/gpt-neox
|
77f137563d7ae370d05744badd2decafe4a3dbcd
|
[
"Apache-2.0"
] | null | null | null |
tests/model/__init__.py
|
pacman82/gpt-neox
|
77f137563d7ae370d05744badd2decafe4a3dbcd
|
[
"Apache-2.0"
] | null | null | null |
"""
Tests concerning the GPT2Model class
"""
from .test_model_initialization import TestModelInitialization
from .test_model_checkpoint import TestModelCheckpoint
#from .test_model_initialization_pipeline import TestModelInitializationPipeline
| 34.857143
| 80
| 0.881148
| 24
| 244
| 8.666667
| 0.625
| 0.115385
| 0.1875
| 0.259615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004444
| 0.077869
| 244
| 7
| 80
| 34.857143
| 0.92
| 0.47541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5938e6a5ea306752315ee9e4c21ff161616df933
| 40
|
py
|
Python
|
src/buildstream/testing/_utils/__init__.py
|
doraskayo/buildstream
|
1c72d4342ae7df360808de22c5e49f55dbb6bec6
|
[
"Apache-2.0"
] | null | null | null |
src/buildstream/testing/_utils/__init__.py
|
doraskayo/buildstream
|
1c72d4342ae7df360808de22c5e49f55dbb6bec6
|
[
"Apache-2.0"
] | null | null | null |
src/buildstream/testing/_utils/__init__.py
|
doraskayo/buildstream
|
1c72d4342ae7df360808de22c5e49f55dbb6bec6
|
[
"Apache-2.0"
] | null | null | null |
from .junction import generate_junction
| 20
| 39
| 0.875
| 5
| 40
| 6.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.944444
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3cd80827014afb9da410e57079445e485050c846
| 39,360
|
py
|
Python
|
graphpype/peak_labelled_mask.py
|
EtienneCmb/graphpype
|
f19fdcd8e98660625a53c733ff8e44d60c31bd68
|
[
"BSD-3-Clause"
] | null | null | null |
graphpype/peak_labelled_mask.py
|
EtienneCmb/graphpype
|
f19fdcd8e98660625a53c733ff8e44d60c31bd68
|
[
"BSD-3-Clause"
] | null | null | null |
graphpype/peak_labelled_mask.py
|
EtienneCmb/graphpype
|
f19fdcd8e98660625a53c733ff8e44d60c31bd68
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Compute ROI labeled mask from spm contrast image or images
"""
import sys, os
#sys.path.append('../irm_analysis')
#from define_variables import *
from graphpype.labeled_mask import compute_recombined_HO_template
from graphpype.utils_dtype_coord import *
import glob
from xml.dom import minidom
import os
import numpy as np
from nibabel import load, save
import nipy.labs.spatial_models.mroi as mroi
from nipy.labs.spatial_models.discrete_domain import grid_domain_from_image
import nipy.labs.spatial_models.hroi as hroi
import nipy.labs.statistical_mapping as stat_map
import itertools as iter
import scipy.spatial.distance as dist
########################################### Activation peaks ROI template (computed once before the pipeline) ################################################
### scan toutes les possibilités dans le cube, et ne retourne que les ROIs dont le nombre de voxels dans le voisinage appartienant à AAL et au mask est supérieur à min_nb_voxels_in_neigh
def return_indexed_mask_neigh_within_binary_template(peak_position,neighbourhood,resliced_template_template_data,orig_peak_coords_dt,min_nb_voxels_in_neigh):
peak_x,peak_y,peak_z = np.array(peak_position,dtype = 'int')
neigh_range = list(range(-neighbourhood,neighbourhood+1))
list_neigh_coords = []
peak_template_roi_index = resliced_template_template_data[peak_x,peak_y,peak_z]
print(peak_template_roi_index)
#print "template index = " + str(peak_template_roi_index)
count_neigh_in_orig_mask = 0
if peak_template_roi_index != 0:
for relative_coord in iter.product(neigh_range, repeat=3):
neigh_x,neigh_y,neigh_z = peak_position + relative_coord
neigh_coord_dt = convert_np_coords_to_coords_dt(np.array([[neigh_x,neigh_y,neigh_z]]))
#neigh_coord_dt = np.array([(neigh_x,neigh_y,neigh_z), ], dtype = coord_dt)
neigh_template_roi_index = resliced_template_template_data[neigh_x,neigh_y,neigh_z]
#print type(orig_peak_coords_dt),orig_peak_coords_dt.dtype,orig_peak_coords_dt.shape
#if neigh_template_roi_index == peak_template_roi_index and np.in1d(neigh_coord_dt,orig_peak_coords_dt):
if neigh_template_roi_index != 0 and neigh_coord_dt in orig_peak_coords_dt:
list_neigh_coords.append(np.array([neigh_x,neigh_y,neigh_z],dtype = 'int16'))
count_neigh_in_orig_mask = count_neigh_in_orig_mask +1
print(list_neigh_coords)
if min_nb_voxels_in_neigh <= len(list_neigh_coords):
return list_neigh_coords,peak_template_roi_index
return [],0
def return_indexed_mask_cube_size_within_binary_template(peak_position,cube_size,resliced_template_template_data,orig_peak_coords_dt,min_nb_voxels_in_neigh):
peak_x,peak_y,peak_z = np.array(peak_position,dtype = 'int')
list_neigh_coords = []
peak_template_roi_index = resliced_template_template_data[peak_x,peak_y,peak_z]
print(peak_template_roi_index)
#print "template index = " + str(peak_template_roi_index)
count_neigh_in_orig_mask = 0
if peak_template_roi_index != 0:
for relative_coord in iter.product(list(range(cube_size)), repeat=3):
neigh_x,neigh_y,neigh_z = peak_position + relative_coord
neigh_coord_dt = convert_np_coords_to_coords_dt(np.array([[neigh_x,neigh_y,neigh_z]]))
#neigh_coord_dt = np.array([(neigh_x,neigh_y,neigh_z), ], dtype = coord_dt)
neigh_template_roi_index = resliced_template_template_data[neigh_x,neigh_y,neigh_z]
#print type(orig_peak_coords_dt),orig_peak_coords_dt.dtype,orig_peak_coords_dt.shape
#if neigh_template_roi_index == peak_template_roi_index and np.in1d(neigh_coord_dt,orig_peak_coords_dt):
if neigh_template_roi_index != 0 and neigh_coord_dt in orig_peak_coords_dt:
list_neigh_coords.append(np.array([neigh_x,neigh_y,neigh_z],dtype = 'int16'))
count_neigh_in_orig_mask = count_neigh_in_orig_mask +1
print(list_neigh_coords)
0/0
if min_nb_voxels_in_neigh <= len(list_neigh_coords):
return list_neigh_coords,peak_template_roi_index
return [],0
def return_neigh_within_same_region(peak_position,neighbourhood,resliced_template_template_data,min_nb_voxels_in_neigh):
peak_x,peak_y,peak_z = np.array(peak_position,dtype = 'int')
neigh_range = list(range(-neighbourhood,neighbourhood+1))
list_neigh_coords = []
peak_template_roi_index = int(resliced_template_template_data[peak_x,peak_y,peak_z])
#print peak_template_roi_index
#print "template index = " + str(peak_template_roi_index)
count_neigh_in_orig_mask = 0
if peak_template_roi_index != 0:
for relative_coord in iter.product(neigh_range, repeat=3):
neigh_x,neigh_y,neigh_z = peak_position + relative_coord
neigh_coord_dt = convert_np_coords_to_coords_dt(np.array([[neigh_x,neigh_y,neigh_z]]))
#neigh_coord_dt = np.array([(neigh_x,neigh_y,neigh_z), ], dtype = coord_dt)
neigh_template_roi_index = resliced_template_template_data[neigh_x,neigh_y,neigh_z]
#print type(orig_peak_coords_dt),orig_peak_coords_dt.dtype,orig_peak_coords_dt.shape
if neigh_template_roi_index == peak_template_roi_index :
list_neigh_coords.append(np.array([neigh_x,neigh_y,neigh_z],dtype = 'int16'))
count_neigh_in_orig_mask = count_neigh_in_orig_mask +1
#print list_neigh_coords
if min_nb_voxels_in_neigh <= len(list_neigh_coords):
return list_neigh_coords,peak_template_roi_index
return [],0
def return_voxels_within_same_region(peak_position,ROI_cube_size,template_data,min_nb_voxels_in_neigh):
template_roi_index = int(template_data[peak_position[0],peak_position[1],peak_position[2]])
if template_roi_index != 0:
list_voxel_coords = []
for relative_coord in iter.product(list(range(ROI_cube_size)), repeat=3):
neigh_x,neigh_y,neigh_z = peak_position + relative_coord
if np.all(peak_position + relative_coord < np.array(template_data.shape)):
if template_data[neigh_x,neigh_y,neigh_z] == template_roi_index :
list_voxel_coords.append(np.array([neigh_x,neigh_y,neigh_z],dtype = 'int16'))
#list_voxel_coords = [[peak_position[0] + relative_coord[0],peak_position[1] + relative_coord[1],peak_position[2] + relative_coord[2]] for relative_coord in iter.product(range(ROI_cube_size), repeat=3) if np.all(peak_position + relative_coord < np.array(template_data.shape)) and template_data[peak_position[0] + relative_coord[0],peak_position[1] + relative_coord[1],peak_position[2] + relative_coord[2]] == template_roi_index]
if min_nb_voxels_in_neigh <= len(list_voxel_coords):
return list_voxel_coords,template_roi_index
return [],0
#########################################################################################################
def remove_close_peaks(list_orig_peak_coords,min_dist = 2.0 * np.sqrt(3)):
list_selected_peaks_coords = []
for orig_peak_coord in list_orig_peak_coords:
orig_peak_coord_np = np.array(orig_peak_coord)
if len(list_selected_peaks_coords) > 0:
selected_peaks_coords_np = np.array(list_selected_peaks_coords)
#orig_peak_coord_dt = convert_np_coords_to_coords_dt(orig_peak_coord)
#selected_peaks_coords_dt = convert_np_coords_to_coords_dt(list_selected_peaks_coords)
#print selected_peaks_coords_np.shape
#print orig_peak_coord_np.shape
dist_to_selected_peaks = dist.cdist(selected_peaks_coords_np,orig_peak_coord_np.reshape(1,3), 'euclidean')
#print dist_to_selected_peaks
min_dist_to_selected_peaks = np.amin(dist_to_selected_peaks,axis = 0)
if min_dist < min_dist_to_selected_peaks:
list_selected_peaks_coords.append(orig_peak_coord_np)
else:
list_selected_peaks_coords.append(orig_peak_coord)
print(len(list_selected_peaks_coords))
return list_selected_peaks_coords
def remove_close_peaks_neigh_in_binary_template(list_orig_peak_coords,template_data,min_dist):
#if len(list_orig_peak_coords) != len(list_orig_peak_MNI_coords):
#print "!!!!!!!!!!!!!!!! Breaking !!!!!!!!!!!!!!!! list_orig_peak_coords %d and list_orig_peak_MNI_coords %d should have similar length" %(len(list_orig_peak_coords),len(list_orig_peak_MNI_coords))
#return
img_shape = template_data.shape
indexed_mask_rois_data = np.zeros(img_shape,dtype = 'int64') -1
print(indexed_mask_rois_data.shape)
list_selected_peaks_coords = []
orig_peak_coords_np = np.array(list_orig_peak_coords)
print(type(orig_peak_coords_np),orig_peak_coords_np.dtype,orig_peak_coords_np.shape)
list_selected_peaks_indexes = []
orig_peak_coords_dt = convert_np_coords_to_coords_dt(orig_peak_coords_np)
print(type(orig_peak_coords_dt),orig_peak_coords_dt.dtype,orig_peak_coords_dt.shape)
#for i,orig_peak_coord in enumerate([list_orig_peak_coords[0]]):
for i,orig_peak_coord in enumerate(list_orig_peak_coords):
orig_peak_coord_np = np.array(orig_peak_coord)
if len(list_selected_peaks_coords) > 0:
selected_peaks_coords_np = np.array(list_selected_peaks_coords)
dist_to_selected_peaks = dist.cdist(selected_peaks_coords_np,orig_peak_coord_np.reshape(1,3), 'euclidean')
min_dist_to_selected_peaks = np.amin(dist_to_selected_peaks,axis = 0)
if min_dist < min_dist_to_selected_peaks:
list_neigh_coords,peak_template_roi_index = return_indexed_mask_neigh_within_binary_template(orig_peak_coord_np,ROI_cube_size,template_data,orig_peak_coords_dt)
#list_neigh_coords,peak_template_roi_index = return_indexed_mask_random_recursive_neigh_within_template_rois(orig_peak_coord_np,ROI_cube_size,template_data,orig_peak_coords_dt)
if peak_template_roi_index > 0:
neigh_coords = np.array(list_neigh_coords,dtype = 'int16')
indexed_mask_rois_data[neigh_coords[:,0],neigh_coords[:,1],neigh_coords[:,2]] = len(list_selected_peaks_coords)
list_selected_peaks_coords.append(orig_peak_coord_np)
list_selected_peaks_indexes.append(i)
print(len(list_selected_peaks_coords))
else:
list_neigh_coords,peak_template_roi_index = return_indexed_mask_neigh_within_binary_template(orig_peak_coord_np,ROI_cube_size,template_data,orig_peak_coords_dt)
#list_neigh_coords,peak_template_roi_index = return_indexed_mask_random_recursive_neigh_within_template_rois(orig_peak_coord_np,ROI_cube_size,template_data,orig_peak_coords_dt)
if peak_template_roi_index > 0:
neigh_coords = np.array(list_neigh_coords,dtype = 'int16')
indexed_mask_rois_data[neigh_coords[:,0],neigh_coords[:,1],neigh_coords[:,2]] = len(list_selected_peaks_coords)
list_selected_peaks_coords.append(orig_peak_coord_np)
list_selected_peaks_indexes.append(i)
print(len(list_selected_peaks_coords))
return list_selected_peaks_coords,indexed_mask_rois_data,list_selected_peaks_indexes
def remove_close_peaks_neigh_in_template(list_orig_peak_coords,template_data,template_labels,min_dist = 3.0 * np.sqrt(3)):
img_shape = template_data.shape
indexed_mask_rois_data = np.zeros(img_shape,dtype = 'int64') -1
print(indexed_mask_rois_data.shape)
label_rois = []
list_selected_peaks_coords = []
orig_peak_coords_np = np.array(list_orig_peak_coords)
print(type(orig_peak_coords_np),orig_peak_coords_np.dtype,orig_peak_coords_np.shape)
orig_peak_coords_dt = convert_np_coords_to_coords_dt(orig_peak_coords_np)
print(type(orig_peak_coords_dt),orig_peak_coords_dt.dtype,orig_peak_coords_dt.shape)
for orig_peak_coord in list_orig_peak_coords:
orig_peak_coord_np = np.array(orig_peak_coord)
if len(list_selected_peaks_coords) > 0:
selected_peaks_coords_np = np.array(list_selected_peaks_coords)
dist_to_selected_peaks = dist.cdist(selected_peaks_coords_np,orig_peak_coord_np.reshape(1,3), 'euclidean')
min_dist_to_selected_peaks = np.amin(dist_to_selected_peaks,axis = 0)
if min_dist < min_dist_to_selected_peaks:
list_neigh_coords,peak_template_roi_index = return_indexed_mask_neigh_within_template(orig_peak_coord_np,ROI_cube_size,template_data,orig_peak_coords_dt)
#list_neigh_coords,peak_template_roi_index = return_indexed_mask_random_recursive_neigh_within_template_rois(orig_peak_coord_np,ROI_cube_size,template_data,orig_peak_coords_dt)
if peak_template_roi_index > 0:
neigh_coords = np.array(list_neigh_coords,dtype = 'int16')
indexed_mask_rois_data[neigh_coords[:,0],neigh_coords[:,1],neigh_coords[:,2]] = len(list_selected_peaks_coords)
label_rois.append(template_labels[peak_template_roi_index-1])
list_selected_peaks_coords.append(orig_peak_coord_np)
else:
list_neigh_coords,peak_template_roi_index = return_indexed_mask_neigh_within_template(orig_peak_coord_np,ROI_cube_size,template_data,orig_peak_coords_dt)
#list_neigh_coords,peak_template_roi_index = return_indexed_mask_random_recursive_neigh_within_template_rois(orig_peak_coord_np,ROI_cube_size,template_data,orig_peak_coords_dt)
if peak_template_roi_index > 0:
neigh_coords = np.array(list_neigh_coords,dtype = 'int16')
indexed_mask_rois_data[neigh_coords[:,0],neigh_coords[:,1],neigh_coords[:,2]] = len(list_selected_peaks_coords)
label_rois.append(template_labels[peak_template_roi_index-1])
list_selected_peaks_coords.append(orig_peak_coord_np)
print(len(list_selected_peaks_coords))
return list_selected_peaks_coords,indexed_mask_rois_data,label_rois
def compute_labelled_mask_from_HO_all_signif_contrasts():
write_dir = os.path.join(nipype_analyses_path,peak_activation_mask_analysis_name)
print(spm_contrasts_path)
if not os.path.exists(write_dir):
os.makedirs(write_dir)
#spm_mask_files = glob.glob(os.path.join(spm_contrasts_path,rel_spm_mask_path,"_contrast_index_[1-6]_group_contrast_index_0/spmT_*.img"))
spm_mask_files = glob.glob(os.path.join(spm_contrasts_path,contrast_pattern))
print(spm_mask_files)
print(spm_mask_files.sort())
# prepare the data
img = nib.load(spm_mask_files[0])
img_header = img.get_header()
img_affine = img.get_affine()
img_shape = img.shape
img_data = img.get_data()
########################## Computing combined HO areas
resliced_full_HO_data,HO_labels,HO_abbrev_labels = compute_recombined_HO_template(img_header,img_affine,img_shape)
########################## Creating peak activation mask contrained by HO areas
#print len(HO_abbrev_labels)
#print len(HO_labels)
#0/0
np_HO_abbrev_labels = np.array(HO_abbrev_labels,dtype = 'string')
np_HO_labels = np.array(HO_labels,dtype = 'string')
template_indexes = np.unique(resliced_full_HO_data)[1:]
#print template_indexes
print(np_HO_labels.shape,np_HO_abbrev_labels.shape,template_indexes.shape)
#info_rois = np.hstack((np.unique(indexed_mask_rois_data)[1:].reshape(len(label_rois),1),np_full_label_rois,np_label_rois,rois_MNI_coords))
#info_rois = np.hstack((np.unique(indexed_mask_rois_data)[1:].reshape(len(label_rois),1),rois_MNI_coords))
info_template = np.hstack((template_indexes.reshape(len(HO_labels),1),np_HO_labels.reshape(len(HO_labels),1),np_HO_abbrev_labels.reshape(len(HO_labels),1)))
#,rois_MNI_coords))
print(info_template)
np.savetxt(info_template_file,info_template, fmt = '%s %s %s')
#np.savetxt(info_template_file,info_rois, fmt = '%s %s %s %s %s %s')
indexed_mask_rois_files = []
coord_rois_files = []
for i,spm_mask_file in enumerate(spm_mask_files):
print(spm_mask_file)
spm_mask_img = nib.load(spm_mask_file)
spm_mask_data = spm_mask_img.get_data()
#### get peaks (avec la fonction stat_map.get_3d_peaks)
peaks = stat_map.get_3d_peaks(image=spm_mask_img,mask=None, threshold = threshold,nn = cluster_nbvoxels)
#print len(peaks)
list_orig_ROI_spm_index = []
if peaks != None :
print(len(peaks))
list_orig_peak_vals = [peak['val'] for peak in peaks]
list_orig_peak_coords = [peak['ijk'] for peak in peaks]
list_orig_peak_MNI_coords = [peak['pos'] for peak in peaks]
merged_mask_data = spm_mask_data[np.logical_and(spm_mask_data != 0.0, np.logical_not(np.isnan(spm_mask_data)))]
list_orig_ROI_spm_index = list_orig_ROI_spm_index + [i+1] * len(peaks)
print(len(list_orig_peak_coords))
print(len(list_orig_ROI_spm_index))
list_selected_peaks_coords,indexed_mask_rois_data,list_selected_peaks_indexes = remove_close_peaks_neigh_in_binary_template(list_orig_peak_coords,resliced_full_HO_data,min_dist_between_ROIs)
print(list_selected_peaks_indexes)
print(len(list_selected_peaks_indexes))
merged_mask_data[indexed_mask_rois_data != 0] += i+1
template_indexes = np.array([resliced_full_HO_data[coord[0],coord[1],coord[2]] for coord in list_selected_peaks_coords],dtype = 'int64')
print(template_indexes)
np_HO_abbrev_labels = np.array(HO_abbrev_labels,dtype = 'string')
np_HO_labels = np.array(HO_labels,dtype = 'string')
print(template_indexes-1)
label_rois = np_HO_abbrev_labels[template_indexes-1]
full_label_rois = np_HO_labels[template_indexes-1]
#print label_rois2
print(label_rois)
#### indexed_mask
indexed_mask_rois_file = os.path.join(nipype_analyses_path,peak_activation_mask_analysis_name, "indexed_mask-" + ROI_mask_prefix + "_spm_contrast" + str(i+1) + ".nii")
#### saving ROI coords as textfile
### ijk coords
coord_rois_file = os.path.join(nipype_analyses_path,peak_activation_mask_analysis_name, "coords-" + ROI_mask_prefix + "_spm_contrast" + str(i+1) + ".txt")
### coords in MNI space
MNI_coord_rois_file = os.path.join(nipype_analyses_path,peak_activation_mask_analysis_name, "coords-MNI-" + ROI_mask_prefix + "_spm_contrast" + str(i+1) + ".txt")
#### saving ROI coords as textfile
label_rois_file = os.path.join(nipype_analyses_path,peak_activation_mask_analysis_name, "labels-" + ROI_mask_prefix + "_spm_contrast" + str(i+1) + ".txt")
#label_rois_file = os.path.join(nipype_analyses_path,peak_activation_mask_analysis_name, "labels-" + ROI_mask_prefix + "_jane.txt")
#### all info in a text file
info_rois_file = os.path.join(nipype_analyses_path,peak_activation_mask_analysis_name, "info-" + ROI_mask_prefix + "_spm_contrast" + str(i+1) + ".txt")
#### exporting Rois image with different indexes
print(np.unique(indexed_mask_rois_data)[1:].shape)
nib.save(nib.Nifti1Image(data = indexed_mask_rois_data,header = img_header,affine = img_affine),indexed_mask_rois_file)
#### saving ROI coords as textfile
np.savetxt(coord_rois_file,np.array(list_selected_peaks_coords,dtype = int), fmt = '%d')
#### saving MNI coords as textfile
list_rois_MNI_coords = [list_orig_peak_MNI_coords[index] for index in list_selected_peaks_indexes]
print(list_rois_MNI_coords)
rois_MNI_coords = np.array(list_rois_MNI_coords,dtype = int)
np.savetxt(MNI_coord_rois_file,rois_MNI_coords, fmt = '%d')
### orig index of peaks
list_rois_orig_indexes = [list_orig_ROI_spm_index[index] for index in list_selected_peaks_indexes]
print(list_rois_orig_indexes)
rois_orig_indexes = np.array(list_rois_orig_indexes,dtype = int).reshape(len(list_rois_orig_indexes),1)
print(rois_orig_indexes.shape)
#### saving labels
np.savetxt(label_rois_file,label_rois, fmt = '%s')
### saving all together for infosource
np_label_rois = np.array(label_rois,dtype = 'string').reshape(len(label_rois),1)
np_full_label_rois = np.array(full_label_rois,dtype = 'string').reshape(len(full_label_rois),1)
print(np_label_rois.shape)
print(rois_MNI_coords.shape)
#info_rois = np.hstack((np.unique(indexed_mask_rois_data)[1:].reshape(len(label_rois),1),np_full_label_rois,np_label_rois,rois_MNI_coords))
#info_rois = np.hstack((np.unique(indexed_mask_rois_data)[1:].reshape(len(label_rois),1),rois_MNI_coords))
info_rois = np.hstack((np.unique(indexed_mask_rois_data)[1:].reshape(len(label_rois),1),np_full_label_rois,np_label_rois,rois_MNI_coords,rois_orig_indexes))
print(info_rois)
np.savetxt(info_rois_file,info_rois, fmt = '%s %s %s %s %s %s %s')
indexed_mask_rois_files.append(indexed_mask_rois_file)
coord_rois_files.append(coord_rois_file)
return indexed_mask_rois_files,coord_rois_files
def compute_labelled_mask_from_HO_and_merged_spm_mask():
write_dir = os.path.join(nipype_analyses_path,peak_activation_mask_analysis_name)
print(spm_contrasts_path)
if not os.path.exists(write_dir):
os.makedirs(write_dir)
spm_mask_files = glob.glob(os.path.join(spm_contrasts_path,contrast_pattern))
print(spm_mask_files)
print(spm_mask_files.sort())
# prepare the data
img = nib.load(spm_mask_files[0])
img_header = img.get_header()
img_affine = img.get_affine()
img_shape = img.shape
img_data = img.get_data()
########################## Computing combined HO areas
resliced_full_HO_data,HO_labels,HO_abbrev_labels = compute_recombined_HO_template(img_header,img_affine,img_shape)
########################## Creating peak activation mask contrained by HO areas
#print len(HO_abbrev_labels)
#print len(HO_labels)
#0/0
np_HO_abbrev_labels = np.array(HO_abbrev_labels,dtype = 'string')
np_HO_labels = np.array(HO_labels,dtype = 'string')
template_indexes = np.unique(resliced_full_HO_data)[1:]
#print template_indexes
print(np_HO_labels.shape,np_HO_abbrev_labels.shape,template_indexes.shape)
#info_rois = np.hstack((np.unique(indexed_mask_rois_data)[1:].reshape(len(label_rois),1),np_full_label_rois,np_label_rois,rois_MNI_coords))
#info_rois = np.hstack((np.unique(indexed_mask_rois_data)[1:].reshape(len(label_rois),1),rois_MNI_coords))
info_template = np.hstack((template_indexes.reshape(len(HO_labels),1),np_HO_labels.reshape(len(HO_labels),1),np_HO_abbrev_labels.reshape(len(HO_labels),1)))
#,rois_MNI_coords))
print(info_template)
np.savetxt(info_template_file,info_template, fmt = '%s %s %s')
#np.savetxt(info_template_file,info_rois, fmt = '%s %s %s %s %s %s')
merged_mask_data = np.zeros(shape = img_shape,dtype = float)
print(merged_mask_data.shape)
list_orig_ROI_spm_index = []
### list for all info about peaks after merging between different contrasts
list_orig_peak_coords = []
list_orig_peak_MNI_coords = []
list_orig_peak_vals = []
for i,spm_mask_file in enumerate(spm_mask_files):
print(spm_mask_file)
spm_mask_img = nib.load(spm_mask_file)
spm_mask_data = spm_mask_img.get_data()
#### get peaks (avec la fonction stat_map.get_3d_peaks)
peaks = stat_map.get_3d_peaks(image=spm_mask_img,mask=None, threshold = threshold,nn = cluster_nbvoxels)
#print len(peaks)
if peaks != None :
print(len(peaks))
list_orig_peak_vals = list_orig_peak_vals + [peak['val'] for peak in peaks]
list_orig_peak_coords = list_orig_peak_coords + [peak['ijk'] for peak in peaks]
list_orig_peak_MNI_coords = list_orig_peak_MNI_coords + [peak['pos'] for peak in peaks]
#print list_orig_peak_vals
#print np.where(np.isnan(spm_mask_data))
#print spm_mask_data[]
#merged_mask_data[np.logical_and(spm_mask_data != 0.0, np.logical_not(np.isnan(spm_mask_data)))] = 1.0
merged_mask_data[spm_mask_data > threshold] += i+1
#print np.sum(np.logical_and(merged_mask_data != 0.0, np.logical_not(np.isnan(merged_mask_data))))
list_orig_ROI_spm_index = list_orig_ROI_spm_index + [i+1] * len(peaks)
print(len(list_orig_peak_coords))
print(len(list_orig_ROI_spm_index))
#### selectionne les pics sur leur distance entre eux et sur leur appatenance au template HO
list_selected_peaks_coords,indexed_mask_rois_data,list_selected_peaks_indexes = remove_close_peaks_neigh_in_binary_template(list_orig_peak_coords,resliced_full_HO_data,min_dist_between_ROIs)
nib.save(nib.Nifti1Image(data = merged_mask_data,header = img_header,affine = img_affine),merged_mask_img_file)
print(list_selected_peaks_indexes)
print(len(list_selected_peaks_indexes))
template_indexes = np.array([resliced_full_HO_data[coord[0],coord[1],coord[2]] for coord in list_selected_peaks_coords],dtype = 'int64')
print(template_indexes)
np_HO_abbrev_labels = np.array(HO_abbrev_labels,dtype = 'string')
np_HO_labels = np.array(HO_labels,dtype = 'string')
print(template_indexes-1)
label_rois = np_HO_abbrev_labels[template_indexes-1]
full_label_rois = np_HO_labels[template_indexes-1]
#print label_rois2
print(label_rois)
#### exporting Rois image with different indexes
print(np.unique(indexed_mask_rois_data)[1:].shape)
nib.save(nib.Nifti1Image(data = indexed_mask_rois_data,header = img_header,affine = img_affine),indexed_mask_rois_file)
#### saving ROI coords as textfile
np.savetxt(coord_rois_file,np.array(list_selected_peaks_coords,dtype = int), fmt = '%d')
#### saving MNI coords as textfile
list_rois_MNI_coords = [list_orig_peak_MNI_coords[index] for index in list_selected_peaks_indexes]
print(list_rois_MNI_coords)
rois_MNI_coords = np.array(list_rois_MNI_coords,dtype = int)
np.savetxt(MNI_coord_rois_file,rois_MNI_coords, fmt = '%d')
### orig index of peaks
list_rois_orig_indexes = [list_orig_ROI_spm_index[index] for index in list_selected_peaks_indexes]
print(list_rois_orig_indexes)
rois_orig_indexes = np.array(list_rois_orig_indexes,dtype = int).reshape(len(list_rois_orig_indexes),1)
print(rois_orig_indexes.shape)
#### mask with orig spm index
orig_spm_index_mask_data = np.zeros(shape = img_shape,dtype = int)
print(np.unique(indexed_mask_rois_data))
for i in np.unique(indexed_mask_rois_data)[1:]:
print(i,np.sum(indexed_mask_rois_data == i),rois_orig_indexes[i])
orig_spm_index_mask_data[indexed_mask_rois_data == i] = rois_orig_indexes[i]
nib.save(nib.Nifti1Image(data = orig_spm_index_mask_data,header = img_header,affine = img_affine),orig_spm_index_mask_file)
#### saving labels
np.savetxt(label_rois_file,label_rois, fmt = '%s')
### saving all together for infosource
np_label_rois = np.array(label_rois,dtype = 'string').reshape(len(label_rois),1)
np_full_label_rois = np.array(full_label_rois,dtype = 'string').reshape(len(full_label_rois),1)
print(np_label_rois.shape)
print(rois_MNI_coords.shape)
#info_rois = np.hstack((np.unique(indexed_mask_rois_data)[1:].reshape(len(label_rois),1),np_full_label_rois,np_label_rois,rois_MNI_coords))
#info_rois = np.hstack((np.unique(indexed_mask_rois_data)[1:].reshape(len(label_rois),1),rois_MNI_coords))
info_rois = np.hstack((np.unique(indexed_mask_rois_data)[1:].reshape(len(label_rois),1),np_full_label_rois,np_label_rois,rois_MNI_coords,rois_orig_indexes))
print(info_rois)
np.savetxt(info_rois_file,info_rois, fmt = '%s %s %s %s %s %s %s')
return indexed_mask_rois_file,coord_rois_file
def compute_labelled_mask_from_HO_and_merged_thr_spm_mask():
write_dir = os.path.join(nipype_analyses_path,peak_activation_mask_analysis_name)
print(spm_contrasts_path)
if not os.path.exists(write_dir):
os.makedirs(write_dir)
spm_contrast_indexes = [3,4,5,8,9,10]
spm_mask_files = [os.path.join(spm_contrasts_path,"_contrast_index_"+str(index)+"_group_contrast_index_0/spmT_0001_thr.img") for index in spm_contrast_indexes]
#spm_mask_files.sort()
print(len(spm_mask_files))
# prepare the data
img = nib.load(spm_mask_files[0])
img_header = img.get_header()
img_affine = img.get_affine()
img_shape = img.shape
img_data = img.get_data()
########################## Computing combined HO areas
resliced_full_HO_data,HO_labels,HO_abbrev_labels = compute_recombined_HO_template(img_header,img_affine,img_shape)
########################## Creating peak activation mask contrained by HO areas
#print len(HO_abbrev_labels)
#print len(HO_labels)
#0/0
np_HO_abbrev_labels = np.array(HO_abbrev_labels,dtype = 'string')
np_HO_labels = np.array(HO_labels,dtype = 'string')
template_indexes = np.unique(resliced_full_HO_data)[1:]
#print template_indexes
print(np_HO_labels.shape,np_HO_abbrev_labels.shape,template_indexes.shape)
#info_rois = np.hstack((np.unique(indexed_mask_rois_data)[1:].reshape(len(label_rois),1),np_full_label_rois,np_label_rois,rois_MNI_coords))
#info_rois = np.hstack((np.unique(indexed_mask_rois_data)[1:].reshape(len(label_rois),1),rois_MNI_coords))
info_template = np.hstack((template_indexes.reshape(len(HO_labels),1),np_HO_labels.reshape(len(HO_labels),1),np_HO_abbrev_labels.reshape(len(HO_labels),1)))
#,rois_MNI_coords))
print(info_template)
np.savetxt(info_template_file,info_template, fmt = '%s %s %s')
#np.savetxt(info_template_file,info_rois, fmt = '%s %s %s %s %s %s')
merged_mask_data = np.zeros(shape = img_shape,dtype = float)
print(merged_mask_data.shape)
list_orig_ROI_spm_index = []
### list for all info about peaks after merging between different contrasts
list_orig_peak_coords = []
list_orig_peak_MNI_coords = []
list_orig_peak_vals = []
for i,spm_mask_file in enumerate(spm_mask_files):
print(spm_mask_file)
spm_mask_img = nib.load(spm_mask_file)
spm_mask_data = spm_mask_img.get_data()
#### get peaks (avec la fonction stat_map.get_3d_peaks)
peaks = stat_map.get_3d_peaks(image=spm_mask_img,mask=None)
#print len(peaks)
if peaks != None :
print(len(peaks))
list_orig_peak_vals = list_orig_peak_vals + [peak['val'] for peak in peaks]
list_orig_peak_coords = list_orig_peak_coords + [peak['ijk'] for peak in peaks]
list_orig_peak_MNI_coords = list_orig_peak_MNI_coords + [peak['pos'] for peak in peaks]
#print list_orig_peak_vals
#print np.where(np.isnan(spm_mask_data))
#print spm_mask_data[]
#merged_mask_data[np.logical_and(spm_mask_data != 0.0, np.logical_not(np.isnan(spm_mask_data)))] = 1.0
merged_mask_data[np.logical_and(spm_mask_data != 0.0, np.logical_not(np.isnan(spm_mask_data)))] += i+1
#print np.sum(np.logical_and(merged_mask_data != 0.0, np.logical_not(np.isnan(merged_mask_data))))
list_orig_ROI_spm_index = list_orig_ROI_spm_index + [i+1] * len(peaks)
print(len(list_orig_peak_coords))
print(len(list_orig_ROI_spm_index))
#### selectionne les pics sur leur distance entre eux et sur leur appatenance au template HO
list_selected_peaks_coords,indexed_mask_rois_data,list_selected_peaks_indexes = remove_close_peaks_neigh_in_binary_template(list_orig_peak_coords,resliced_full_HO_data,min_dist_between_ROIs)
#list_selected_peaks_coords,indexed_mask_rois_data,list_selected_peaks_indexes = remove_close_peaks_neigh_in_binary_template(sorded_merged_peaks_coords,resliced_full_HO_data,min_dist_between_ROIs)
nib.save(nib.Nifti1Image(data = merged_mask_data,header = img_header,affine = img_affine),merged_mask_img_file)
print(list_selected_peaks_indexes)
print(len(list_selected_peaks_indexes))
#for coord in list_selected_peaks_coords:
#print coord
##template_indexes =
#print resliced_full_HO_data[coord[0],coord[1],coord[2]]
template_indexes = np.array([resliced_full_HO_data[coord[0],coord[1],coord[2]] for coord in list_selected_peaks_coords],dtype = 'int64')
print(template_indexes)
np_HO_abbrev_labels = np.array(HO_abbrev_labels,dtype = 'string')
np_HO_labels = np.array(HO_labels,dtype = 'string')
print(template_indexes-1)
label_rois = np_HO_abbrev_labels[template_indexes-1]
full_label_rois = np_HO_labels[template_indexes-1]
#print label_rois2
print(label_rois)
#### exporting Rois image with different indexes
print(np.unique(indexed_mask_rois_data)[1:].shape)
nib.save(nib.Nifti1Image(data = indexed_mask_rois_data,header = img_header,affine = img_affine),indexed_mask_rois_file)
#### saving ROI coords as textfile
np.savetxt(coord_rois_file,np.array(list_selected_peaks_coords,dtype = int), fmt = '%d')
#### saving MNI coords as textfile
list_rois_MNI_coords = [list_orig_peak_MNI_coords[index] for index in list_selected_peaks_indexes]
print(list_rois_MNI_coords)
rois_MNI_coords = np.array(list_rois_MNI_coords,dtype = int)
np.savetxt(MNI_coord_rois_file,rois_MNI_coords, fmt = '%d')
### orig index of peaks
list_rois_orig_indexes = [list_orig_ROI_spm_index[index] for index in list_selected_peaks_indexes]
print(list_rois_orig_indexes)
rois_orig_indexes = np.array(list_rois_orig_indexes,dtype = int).reshape(len(list_rois_orig_indexes),1)
print(rois_orig_indexes.shape)
np.savetxt(rois_orig_indexes_file,rois_orig_indexes, fmt = '%d')
#### mask with orig spm index
orig_spm_index_mask_data = np.zeros(shape = img_shape,dtype = int)
print(np.unique(indexed_mask_rois_data))
for i in np.unique(indexed_mask_rois_data)[1:]:
print(i,np.sum(indexed_mask_rois_data == i),rois_orig_indexes[i])
orig_spm_index_mask_data[indexed_mask_rois_data == i] = rois_orig_indexes[i]
nib.save(nib.Nifti1Image(data = orig_spm_index_mask_data,header = img_header,affine = img_affine),orig_spm_index_mask_file)
#### saving labels
np.savetxt(label_rois_file,label_rois, fmt = '%s')
### saving all together for infosource
np_label_rois = np.array(label_rois,dtype = 'string').reshape(len(label_rois),1)
np_full_label_rois = np.array(full_label_rois,dtype = 'string').reshape(len(full_label_rois),1)
print(np_label_rois.shape)
print(rois_MNI_coords.shape)
#info_rois = np.hstack((np.unique(indexed_mask_rois_data)[1:].reshape(len(label_rois),1),np_full_label_rois,np_label_rois,rois_MNI_coords))
#info_rois = np.hstack((np.unique(indexed_mask_rois_data)[1:].reshape(len(label_rois),1),rois_MNI_coords))
info_rois = np.hstack((np.unique(indexed_mask_rois_data)[1:].reshape(len(label_rois),1),np_full_label_rois,np_label_rois,rois_MNI_coords,rois_orig_indexes))
print(info_rois)
np.savetxt(info_rois_file,info_rois, fmt = '%s %s %s %s %s %s %s')
return indexed_mask_rois_file,coord_rois_file
if __name__ =='__main__':
#compute_labelled_mask_from_HO()
#compute_labelled_mask_from_HO_all_signif_contrasts()
compute_labelled_mask_from_HO_and_merged_thr_spm_mask()
| 40.20429
| 437
| 0.667048
| 5,434
| 39,360
| 4.371181
| 0.047663
| 0.039742
| 0.0389
| 0.035196
| 0.934198
| 0.916642
| 0.908054
| 0.894792
| 0.880689
| 0.874374
| 0
| 0.008432
| 0.237703
| 39,360
| 978
| 438
| 40.245399
| 0.783229
| 0.197154
| 0
| 0.829146
| 0
| 0
| 0.017179
| 0.001321
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025126
| false
| 0
| 0.035176
| 0
| 0.095477
| 0.20603
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
59b5edea8cc944821e4c8839c24f1c4a6b2e11af
| 100
|
py
|
Python
|
schedule/context_processors.py
|
yourcelf/masterschedule
|
e585df0e9edcaff5fa4f04f77a9452e3073b5db7
|
[
"Unlicense"
] | 1
|
2015-02-11T04:08:36.000Z
|
2015-02-11T04:08:36.000Z
|
schedule/context_processors.py
|
yourcelf/masterschedule
|
e585df0e9edcaff5fa4f04f77a9452e3073b5db7
|
[
"Unlicense"
] | null | null | null |
schedule/context_processors.py
|
yourcelf/masterschedule
|
e585df0e9edcaff5fa4f04f77a9452e3073b5db7
|
[
"Unlicense"
] | null | null | null |
from django.conf import settings
def base_url(request):
return {"BASE_URL": settings.BASE_URL}
| 20
| 42
| 0.76
| 15
| 100
| 4.866667
| 0.666667
| 0.287671
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14
| 100
| 4
| 43
| 25
| 0.848837
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
59bb2f7c232ec34715e468717ab1c201b2312f19
| 4,441
|
py
|
Python
|
http_parser/config.py
|
CharlesZhong/Mobile-Celluar-Measure
|
1f7a4ac017ec5a2d03bebfb504df37792bf0eed7
|
[
"MIT"
] | null | null | null |
http_parser/config.py
|
CharlesZhong/Mobile-Celluar-Measure
|
1f7a4ac017ec5a2d03bebfb504df37792bf0eed7
|
[
"MIT"
] | null | null | null |
http_parser/config.py
|
CharlesZhong/Mobile-Celluar-Measure
|
1f7a4ac017ec5a2d03bebfb504df37792bf0eed7
|
[
"MIT"
] | null | null | null |
__author__ = 'Charles'
import os
settings = {
"mac_test": {
"data_dir": "/Users/Charles/Data/NSDI2015",
"ori_input_file": "test_gen_jpeg.txt",
"output_dir": "/Users/Charles/Data/NSDI2015/output/mac_test",
"base_output_file": "ori_output.txt",
"image_output_file": "image_output.txt",
"ori_image_output_file": "ori_image_output.txt",
"filter_image_output_file": "filter_image_output_file.txt",
"jpeg_dir": "/Users/Charles/Data/NSDI2015/jpeg",
"webp_time_output_file": "webp_time_output_file.txt",
"zip_time_output_file" : "zip_time_output_file.txt",
},
"mac_prod":{
"data_dir": "/Users/Charles/Data/NSDI2015",
"ori_input_file": "1211.txt",
"output_dir": "/Users/Charles/Data/NSDI2015/output/mac_prod",
"base_output_file": "ori_output.txt",
"image_output_file": "image_output.txt",
"ori_image_output_file": "ori_image_output.txt",
"filter_image_output_file": "filter_image_output_file.txt",
"jpeg_dir": "/Users/Charles/Data/NSDI2015/jpeg",
"webp_time_output_file": "webp_time_output_file.txt",
"zip_time_output_file" : "zip_time_output_file.txt",
},
"linux_test": {
"data_dir": "/media/sf_baidu_data",
"ori_input_file": "test_ori.txt",
"output_dir": "/media/sf_baidu_data/linux_test",
"base_output_file": "ori_output.txt",
"image_output_file": "image_output.txt",
"ori_image_output_file": "ori_image_output.txt",
"filter_image_output_file": "filter_image_output_file.txt",
"jpeg_dir": "/Users/Charles/Data/NSDI2015/jpeg",
},
"linux_prod":{
"data_dir": "/media/sf_baidu_data",
"ori_input_file": "1211.txt",
"output_dir": "/media/sf_baidu_data/linux_prod",
"base_output_file": "ori_output.txt",
"image_output_file": "image_output.txt",
"ori_image_output_file": "ori_image_output.txt",
"filter_image_output_file": "filter_image_output_file.txt",
},
"thtf_test":{
"data_dir": "/home/charles/Data/NSDI2015",
"ori_input_file": "ori_jpeg_sample.txt",
"output_dir": "/home/charles/Data/NSDI2015/result/test",
"base_output_file": "ori_output.txt",
"image_output_file": "image_output.txt",
"ori_image_output_file": "ori_image_output.txt",
"filter_image_output_file": "ori_jpeg.txt",
"jpeg_dir": "/home/charles/Data/NSDI2015/jpeg_test",
"webp_time_output_file": "webp_time_output_file.txt",
"zip_time_output_file" : "zip_time_output_file.txt",
},
"thtf_prod":{
"data_dir": "/home/charles/Data/NSDI2015",
"ori_input_file": "ori_jpeg.txt",
"output_dir": "/home/charles/Data/NSDI2015/result/prod",
"base_output_file": "ori_output.txt",
"image_output_file": "image_output.txt",
"ori_image_output_file": "ori_image_output.txt",
"filter_image_output_file": "ori_jpeg.txt",
"jpeg_dir": "/home/charles/Data/NSDI2015/jpeg",
"webp_time_output_file": "webp_time_output_file.txt",
"zip_time_output_file" : "zip_time_output_file.txt",
},
"s3_test":{
"data_dir": "/home/zhongxin/workspace/nsdi_2015/data",
"ori_input_file": "test_ori.txt",
"output_dir": "/home/zhongxin/workspace/nsdi_2015/data/result/test",
"base_output_file": "ori_output.txt",
"image_output_file": "image_output.txt",
"ori_image_output_file": "ori_image_output.txt",
"filter_image_output_file": "filter_image_output_file.txt",
"jpeg_dir": "/home/zhongxin/workspace/nsdi_2015/data/jpeg",
"webp_time_output_file": "webp_time_output_file.txt",
"zip_time_output_file" : "zip_time_output_file.txt",
},
"s3_prod":{
"data_dir": "/home/zhongxin/workspace/nsdi_2015/data",
"ori_input_file": "test_ori.txt",
"output_dir": "/home/zhongxin/workspace/nsdi_2015/data/result/prod",
"base_output_file": "ori_output.txt",
"image_output_file": "image_output.txt",
"ori_image_output_file": "ori_image_output.txt",
"filter_image_output_file": "filter_image_output_file.txt",
"jpeg_dir": "/home/zhongxin/workspace/nsdi_2015/data/jpeg",
"webp_time_output_file": "webp_time_output_file.txt",
"zip_time_output_file" : "zip_time_output_file.txt",
},
}
| 43.116505
| 76
| 0.65751
| 581
| 4,441
| 4.540448
| 0.067126
| 0.235027
| 0.170584
| 0.111448
| 0.959818
| 0.959818
| 0.959818
| 0.956785
| 0.90978
| 0.809325
| 0
| 0.023975
| 0.192299
| 4,441
| 103
| 77
| 43.116505
| 0.711458
| 0
| 0
| 0.652632
| 0
| 0
| 0.688204
| 0.387663
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010526
| 0
| 0.010526
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
59d4deb0ca8ba255304c19372704e4d1d7081bd6
| 166
|
py
|
Python
|
doc/python_study_code/func2.py
|
beiliwenxiao/vimrc
|
eb38fc769f3f5f78000060dac674b5c49d63c24c
|
[
"MIT"
] | null | null | null |
doc/python_study_code/func2.py
|
beiliwenxiao/vimrc
|
eb38fc769f3f5f78000060dac674b5c49d63c24c
|
[
"MIT"
] | null | null | null |
doc/python_study_code/func2.py
|
beiliwenxiao/vimrc
|
eb38fc769f3f5f78000060dac674b5c49d63c24c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
def concat(*args, sep = "/"):
return sep.join(args)
concat("earth","mars","venus")
concat("earth","mars","venus", sep=".")
| 18.444444
| 39
| 0.608434
| 24
| 166
| 4.208333
| 0.666667
| 0.217822
| 0.29703
| 0.39604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006849
| 0.120482
| 166
| 8
| 40
| 20.75
| 0.684932
| 0.198795
| 0
| 0
| 0
| 0
| 0.229008
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0
| 0.25
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 8
|
ab9a9089566344b28ed3d944a1d0b38197b4a4d2
| 783
|
py
|
Python
|
tests/parser/pasi-brew-eite-99-example-penguin-conclusions.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/pasi-brew-eite-99-example-penguin-conclusions.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/pasi-brew-eite-99-example-penguin-conclusions.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
% Input: specify parts of the rules ...
rule(r1). head(bird,r1).
rule(r2). head(swims,r1).
rule(r3). head(neg_flies,r3). pbl(peng,r3). nbl(flies,r3).
rule(r4). head(flies,r4). pbl(bird,r4). nbl(neg_flies,r4).
rule(r5). head(peng,r5). pbl(bird,r5). pbl(swims,r5). nbl(neg_peng,r5).
opp(flies,neg_flies).
opp(peng,neg_peng).
pr(r3,r4).
"""
output = """
% Input: specify parts of the rules ...
rule(r1). head(bird,r1).
rule(r2). head(swims,r1).
rule(r3). head(neg_flies,r3). pbl(peng,r3). nbl(flies,r3).
rule(r4). head(flies,r4). pbl(bird,r4). nbl(neg_flies,r4).
rule(r5). head(peng,r5). pbl(bird,r5). pbl(swims,r5). nbl(neg_peng,r5).
opp(flies,neg_flies).
opp(peng,neg_peng).
pr(r3,r4).
"""
| 18.209302
| 73
| 0.591315
| 134
| 783
| 3.380597
| 0.171642
| 0.10596
| 0.075055
| 0.083885
| 0.975717
| 0.975717
| 0.975717
| 0.975717
| 0.975717
| 0.975717
| 0
| 0.059655
| 0.186462
| 783
| 42
| 74
| 18.642857
| 0.651491
| 0
| 0
| 0.909091
| 0
| 0.272727
| 0.958389
| 0.056376
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
abf1e76bfbdc7039a5612259f14bc2f5e13da183
| 136
|
py
|
Python
|
tests/test_basic_addition.py
|
charlestudor/PokerNowLogConverter
|
f888a26d805546b2e470c1066552da4dfe9caf58
|
[
"MIT"
] | 1
|
2022-01-18T18:14:41.000Z
|
2022-01-18T18:14:41.000Z
|
tests/test_basic_addition.py
|
charlestudor/PokerNowLogConverter
|
f888a26d805546b2e470c1066552da4dfe9caf58
|
[
"MIT"
] | 4
|
2022-01-19T10:48:49.000Z
|
2022-01-26T21:55:08.000Z
|
tests/test_basic_addition.py
|
charlestudor/PokerNowLogConverter
|
f888a26d805546b2e470c1066552da4dfe9caf58
|
[
"MIT"
] | null | null | null |
"""Test Basic Addition"""
def test_basic_addition():
assert 1 + 1 == 2
def test_basic_addition_inverse():
assert 1 + 1 != 3
| 13.6
| 34
| 0.639706
| 20
| 136
| 4.1
| 0.45
| 0.329268
| 0.621951
| 0.487805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 0.227941
| 136
| 9
| 35
| 15.111111
| 0.72381
| 0.139706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
abfa5cc216cbb2d90e18d9547cfe0a82454b3995
| 153,449
|
py
|
Python
|
python/webgme_bindings/webgme_bindings/core.py
|
Tasse00/bindings
|
2666d71631c47750babec046214895c6e81bb3b8
|
[
"MIT"
] | null | null | null |
python/webgme_bindings/webgme_bindings/core.py
|
Tasse00/bindings
|
2666d71631c47750babec046214895c6e81bb3b8
|
[
"MIT"
] | 15
|
2018-10-30T19:02:54.000Z
|
2021-04-01T10:52:29.000Z
|
python/webgme_bindings/webgme_bindings/core.py
|
Tasse00/bindings
|
2666d71631c47750babec046214895c6e81bb3b8
|
[
"MIT"
] | 4
|
2019-09-27T20:21:50.000Z
|
2021-04-21T00:49:26.000Z
|
"""
For more details regarding inputs and output in form of complex dictionaries see the original source docs at:
%host%/docs/source/Core.html
for example:
`https://editor.webgme.org/docs/source/Core.html <https://editor.webgme.org/docs/source/Core.html>`_
"""
class Core(object):
"""
Class for querying and manipulating the tree graph in a gme project. Practically, each method takes at least one
node-dict as input. Use core.load_root(root_hash) to get an initial root-node of a the tree.
"""
def __init__(self, webgme):
self._webgme = webgme
self._CONSTANTS = None
def _send(self, payload):
payload['type'] = 'core'
self._webgme.send_request(payload)
return self._webgme.handle_response()
@property
def CONSTANTS(self):
"""
A dictionary with the `constants associated with the Core <https://github.com/webgme/webgme-engine/blob/master/src/common/core/constants.js>`_.
"""
if self._CONSTANTS is None:
self._CONSTANTS = self._send({'name': 'CONSTANTS', 'args': []})
return self._CONSTANTS
def add_library(self, node, name, library_root_hash, library_info=None):
"""
It adds a project as library to your project by copying it over. The library will be a node\
with the given name directly under your project's ROOT. It becomes a read-only portion of your project.\
You will only be able to manipulate it with library functions, but cannot edit the individual nodes inside.\
However you will be able to instantiate or copy the nodes into other places of your project. Every node\
that was part of the META in the originating project becomes part of your project's meta.
:param node: any regular node in your project.
:type node: dict
:param name: the name of the library you wish to use as a namespace in your project.
:type name: str
:param library_root_hash: the hash of your library's root\
(must exist in the project's collection at the time of call).
:type library_root_hash: str
:param library_info: information about your project.
:type library_info: dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the result of the execution.
:raises CoreIllegalOperationError: the result of the execution.
:raises CoreInternalError: the result of the execution.
"""
return self._send({'name': 'addLibrary', 'args': [node, name, library_root_hash, library_info]})
def add_member(self, node, name, member):
"""
Adds a member to the given set.
:param node: the owner of the set.
:type node: dict
:param name: the name of the set.
:type name: str
:param member: the new member of the set.
:type member: dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'addMember', 'args': [node, name, member]})
def add_mixin(self, node, path):
"""
Adds a mixin to the mixin set of the node.
:param node: the node in question.
:type node: dict
:param path: the path of the mixin node.
:type path: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'addMixin', 'args': [node, path]})
def apply_resolution(self, conflict):
"""
When our attempt to merge two patches ended in some conflict, then we can modify that result highlighting\
that in case of every conflict, which side we prefer (mine vs. theirs). If we give that object as an input\
to this function, it will finish the merge resolving the conflict according our settings and present a final\
patch.
:param conflict: the object that represents our settings for every conflict and the so-far-merged\
patch.
:type conflict: dict
:returns: The function results in a tree structured patch object that contains the changesthat cover\
both parties modifications (and the conflicts are resolved according the input settings).
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'applyResolution', 'args': [conflict]})
def apply_tree_diff(self, node, patch):
"""
Apply changes to the current project.
:param node: the root of the containment hierarchy where we wish to apply the changes
:type node: dict
:param patch: the tree structured collection of changes represented with a special JSON object
:type patch: dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the result of the execution.
:raises CoreInternalError: the result of the execution.
"""
return self._send({'name': 'applyTreeDiff', 'args': [node, patch]})
def can_set_as_mixin(self, node, path):
"""
Checks if the given path can be added as a mixin to the given node.
:param node: the node in question.
:type node: dict
:param path: the path of the mixin node.
:type path: str
:returns: Returns an object with isOk set to true if the given path can be added as a\
mixin to the given node. If it cannot, the reason will be reported under reason.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'canSetAsMixin', 'args': [node, path]})
def clear_meta_rules(self, node):
"""
Removes all META rules defined at the node. Note that it does not clear any rules from other meta-nodes\
where the node if referenced.
:param node: the node in question.
:type node: dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'clearMetaRules', 'args': [node]})
def clear_mixins(self, node):
"""
Removes all mixins for a given node.
:param node: the node in question.
:type node: dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'clearMixins', 'args': [node]})
def copy_node(self, node, parent):
"""
Copies the given node into parent.
:param node: the node to be copied.
:type node: dict
:param parent: the parent node of the copy.
:type parent: dict
:returns: The function returns the copied node.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'copyNode', 'args': [node, parent]})
def copy_nodes(self, nodes, parent):
"""
Copies the given nodes into parent.
:param nodes: the nodes to be copied.
:type nodes: list of dict
:param parent: the parent node of the copy.
:type parent: dict
:returns: The function returns an array of the copied nodes. The order follows\
the order of originals.
:rtype: list of dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'copyNodes', 'args': [nodes, parent]})
def create_child(self, node, base):
"""
Creates a child, with base as provided, inside the provided node.
:param node: the parent of the node to be created.
:type node: dict
:param base: the base of the node to be created.
:type base: dict
:returns: The function returns the created child node.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'createChild', 'args': [node, base]})
def create_node(self, parameters=None):
"""
Creates a node according to the given parameters.
:param parameters: the details of the creation.
:type parameters: dict
:returns: The function returns the created node.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'createNode', 'args': [parameters]})
def create_set(self, node, name):
"""
Creates a set for the node.
:param node: the owner of the set.
:type node: dict
:param name: the name of the set.
:type name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'createSet', 'args': [node, name]})
def del_aspect_meta(self, node, name):
"""
Removes the given aspect rule of the node.
:param node: the node in question.
:type node: dict
:param name: the name of the aspect.
:type name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delAspectMeta', 'args': [node, name]})
def del_aspect_meta_target(self, node, name, path):
"""
Removes a valid type from the given aspect of the node.
:param node: the node in question.
:type node: dict
:param name: the name of the aspect.
:type name: str
:param path: the absolute path of the valid type of the aspect.
:type path: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delAspectMetaTarget', 'args': [node, name, path]})
def del_attribute(self, node, name):
"""
Removes the given attributes from the given node.
:param node: the node in question.
:type node: dict
:param name: the name of the attribute.
:type name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delAttribute', 'args': [node, name]})
def del_attribute_meta(self, node, name):
"""
Removes an attribute definition from the META rules of the node.
:param node: the node in question.
:type node: dict
:param name: the name of the attribute.
:type name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delAttributeMeta', 'args': [node, name]})
def del_child_meta(self, node, path):
"""
Removes the given child rule from the node.
:param node: the node in question.
:type node: dict
:param path: the absolute path of the child which rule is to be removed from the node.
:type path: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delChildMeta', 'args': [node, path]})
def del_constraint(self, node, name):
"""
Removes a constraint from the node.
:param node: the node in question.
:type node: dict
:param name: the name of the constraint.
:type name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delConstraint', 'args': [node, name]})
def del_member(self, node, name, path):
"""
Removes a member from the set. The functions doesn't remove the node itself.
:param node: the owner of the set.
:type node: dict
:param name: the name of the set.
:type name: str
:param path: the absolute path of the member to be removed.
:type path: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delMember', 'args': [node, name, path]})
def del_member_attribute(self, node, set_name, member_path, attr_name):
"""
Removes an attribute which represented a property of the given set membership.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param member_path: the absolute path of the member node.
:type member_path: str
:param attr_name: the name of the attribute.
:type attr_name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delMemberAttribute', 'args': [node, set_name, member_path, attr_name]})
def del_member_registry(self, node, set_name, path, reg_name):
"""
Removes a registry entry which represented a property of the given set membership.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param path: the absolute path of the member node.
:type path: str
:param reg_name: the name of the registry entry.
:type reg_name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delMemberRegistry', 'args': [node, set_name, path, reg_name]})
def del_mixin(self, node, path):
"""
Removes a mixin from the mixin set of the node.
:param node: the node in question.
:type node: dict
:param path: the path of the mixin node.
:type path: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delMixin', 'args': [node, path]})
def del_pointer(self, node, name):
"""
Removes the pointer from the node. (Aliased deletePointer.)
:param node: the node in question.
:type node: dict
:param name: the name of the pointer in question.
:type name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delPointer', 'args': [node, name]})
def del_pointer_meta(self, node, name):
"""
Removes the complete META rule regarding the given pointer/set of the node.
:param node: the node in question.
:type node: dict
:param name: the name of the pointer/set.
:type name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delPointerMeta', 'args': [node, name]})
def del_pointer_meta_target(self, node, name, path):
"""
Removes a possible target type from the pointer/set of the node.
:param node: the node in question.
:type node: dict
:param name: the name of the pointer/set
:type name: str
:param path: the absolute path of the possible target type.
:type path: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If node is read-only, or definition does not exist.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delPointerMetaTarget', 'args': [node, name, path]})
def del_registry(self, node, name):
"""
Removes the given registry entry from the given node.
:param node: the node in question.
:type node: dict
:param name: the name of the registry entry.
:type name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delRegistry', 'args': [node, name]})
def del_set(self, node, name):
"""
Removes a set from the node. (Aliased deleteSet.)
:param node: the owner of the set.
:type node: dict
:param name: the name of the set.
:type name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delSet', 'args': [node, name]})
def del_set_attribute(self, node, set_name, attr_name):
"""
Removes the attribute entry for the set at the node.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param attr_name: the name of the attribute entry.
:type attr_name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delSetAttribute', 'args': [node, set_name, attr_name]})
def del_set_registry(self, node, set_name, reg_name):
"""
Removes the registry entry for the set at the node.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param reg_name: the name of the registry entry.
:type reg_name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'delSetRegistry', 'args': [node, set_name, reg_name]})
def delete_node(self, node):
"""
Removes a node from the containment hierarchy.
:param node: the node to be removed.
:type node: dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'deleteNode', 'args': [node]})
def delete_pointer(self, node, name):
"""
Removes the pointer from the node. (Aliased delPointer.)
:param node: the node in question.
:type node: dict
:param name: the name of the pointer in question.
:type name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'deletePointer', 'args': [node, name]})
def delete_set(self, node, name):
"""
Removes a set from the node. (Aliased delSet.)
:param node: the owner of the set.
:type node: dict
:param name: the name of the set.
:type name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'deleteSet', 'args': [node, name]})
def generate_tree_diff(self, source_root, target_root):
"""
Generates a differential tree among the two states of the project that contains the necessary changes\
that can modify the source to be identical to the target. The result is in form of a json object.
:param source_root: the root node of the source state.
:type source_root: dict
:param target_root: the root node of the target state.
:type target_root: dict
:returns: the difference between the two containment hierarchies in\
a special JSON object
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the status of the exectuion.
:raises CoreInternalError: the status of the exectuion.
"""
return self._send({'name': 'generateTreeDiff', 'args': [source_root, target_root]})
def get_all_meta_nodes(self, node):
"""
Returns all META nodes.
:param node: any node of the containment hierarchy.
:type node: dict
:returns: The function returns a dictionary. The keys of the dictionary\
are the absolute paths of the META nodes of the project. Every value of the dictionary\
is a {@link module:Core~Node}.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getAllMetaNodes', 'args': [node]})
def get_aspect_definition_info(self, node, name, member):
"""
Returns the meta nodes that introduce the given aspect relationship.
:param node: the node in question.
:type node: dict
:param name: the name of the set in question.
:type name: str
:param member: the child.
:type member: dict
:returns: The owner and the target of the aspect meta-rule that makes member a\
valid member of the named aspect of node.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getAspectDefinitionInfo', 'args': [node, name, member]})
def get_aspect_definition_owner(self, node, name):
"""
Returns the meta node that introduces the given aspect.
:param node: the node in question.
:type node: dict
:param name: the name of the set in question.
:type name: str
:returns: The meta-node that defines the aspect and makes a valid aspect for the given node.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getAspectDefinitionOwner', 'args': [node, name]})
def get_aspect_meta(self, node, name):
"""
Returns the list of valid children types of the given aspect.
:param node: the node in question.
:type node: dict
:param name: the name of the aspect.
:type name: str
:returns: The function returns a list of absolute paths of nodes that are valid children of the node\
and fits to the META rules defined for the aspect. Any children, visible under the given aspect of the node\
must be an instance of at least one node represented by the absolute paths.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getAspectMeta', 'args': [node, name]})
def get_attribute(self, node, name):
"""
Retrieves the value of the given attribute of the given node.
:param node: the node in question.
:type node: dict
:param name: the name of the attribute.
:type name: str
:returns: The function returns the value of the attribute of the node.\
If the value is undefined that means the node do not have\
such attribute defined.
:rtype: str or int or float or bool or dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getAttribute', 'args': [node, name]})
def get_attribute_definition_owner(self, node, name):
"""
Returns the meta node that introduces the given attribute.
:param node: the node in question.
:type node: dict
:param name: the name of the attribute in question.
:type name: str
:returns: The meta-node that defines the attribute and makes it valid attribute for the\
given node.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getAttributeDefinitionOwner', 'args': [node, name]})
def get_attribute_meta(self, node, name):
"""
Returns the definition object of an attribute from the META rules of the node.
:param node: the node in question.
:type node: dict
:param name: the name of the attribute.
:type name: str
:returns: The function returns the definition object, where type is always defined.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getAttributeMeta', 'args': [node, name]})
def get_attribute_names(self, node):
"""
Returns the names of the defined attributes of the node.
:param node: the node in question.
:type node: dict
:returns: The function returns an array of the names of the attributes of the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getAttributeNames', 'args': [node]})
def get_base(self, node):
"""
Returns the base node.
:param node: the node in question.
:type node: dict
:returns: Returns the base of the given node or null if there is no such node.
:rtype: dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getBase', 'args': [node]})
def get_base_root(self, node):
"""
Returns the root of the inheritance chain of the given node.
:param node: the node in question.
:type node: dict
:returns: Returns the root of the inheritance chain (usually the FCO).
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getBaseRoot', 'args': [node]})
def get_base_type(self, node):
"""
Returns the meta-node of the node in question, that is the first base node that is part of the meta.\
(Aliased getMetaType).
:param node: the node in question
:type node: dict
:returns: Returns the first node (including itself) among the inheritance chain\
that is a META node. It returns null if it does not find such node (ideally the only node with this result\
is the ROOT).
:rtype: dict or None
:raises CoreIllegalArgumentError: If node is not a Node
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getBaseType', 'args': [node]})
def get_base_types(self, node):
"""
Searches for the closest META node of the node in question and the direct mixins of that node.
:param node: the node in question
:type node: dict
:returns: Returns the closest Meta node that is a base of the given node\
plus it returns all the mixin nodes associated with the base.
:rtype: list of dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getBaseTypes', 'args': [node]})
def get_child_definition_info(self, node, child):
"""
Returns the meta nodes that introduce the given containment relationship.
:param node: the node in question.
:type node: dict
:param child: the child.
:type child: dict
:returns: The owner and the target of the containment meta-rule that makes child a\
valid child of node.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getChildDefinitionInfo', 'args': [node, child]})
def get_children_hashes(self, node):
"""
Collects the data hash values of the children of the node.
:param node: the node in question.
:type node: dict
:returns: The function returns a dictionary of\
{@link module:Core~ObjectHash} that stored in pair with the relative id of the corresponding\
child of the node.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getChildrenHashes', 'args': [node]})
def get_children_meta(self, node):
"""
Return a JSON representation of the META rules regarding the valid children of the given node.
:param node: the node in question.
:type node: dict
:returns: The function returns a detailed JSON structure that represents the META\
rules regarding the possible children of the node.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getChildrenMeta', 'args': [node]})
def get_children_paths(self, node):
"""
Collects the paths of all the children of the given node.
:param node: the container node in question.
:type node: dict
:returns: The function returns an array of the absolute paths of the children.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getChildrenPaths', 'args': [node]})
def get_children_relids(self, node):
"""
Collects the relative ids of all the children of the given node.
:param node: the container node in question.
:type node: dict
:returns: The function returns an array of the relative ids.
:rtype: list of str
"""
return self._send({'name': 'getChildrenRelids', 'args': [node]})
def get_closure_information(self, nodes):
"""
Collects the necessary information to export the set of input nodes and use it in other\
- compatible - projects.
:param nodes: the set of nodes that we want to export
:type nodes: list of dict
:returns: If the closure is available for export, the returned special JSON object\
will contain information about the necessary data that needs to be exported as well as relations\
that will need to be recreated in the destination project to preserve the structure of nodes.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getClosureInformation', 'args': [nodes]})
def get_collection_names(self, node):
"""
Retrieves a list of the defined pointer names that has the node as target.
:param node: the node in question.
:type node: dict
:returns: The function returns an array of the names of the pointers pointing to the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getCollectionNames', 'args': [node]})
def get_collection_paths(self, node, name):
"""
Retrieves a list of absolute paths of nodes that has a given pointer which points to the given node.
:param node: the node in question.
:type node: dict
:param name: the name of the pointer.
:type name: str
:returns: The function returns an array of absolute paths of nodes that\
has the pointer pointing to the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getCollectionPaths', 'args': [node, name]})
def get_common_base(self, nodes):
"""
Returns the common base node of all supplied nodes.
:param nodes: a variable number of nodes to compare
:type nodes: list of dict
:returns: The common base or null if e.g. the root node was passed.
:rtype: dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getCommonBase', 'args': [nodes]})
def get_common_parent(self, nodes):
"""
Returns the common parent node of all supplied nodes.
:param nodes: a variable number of nodes to compare
:type nodes: list of dict
:returns: The common base or null if no nodes were passed.
:rtype: dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getCommonParent', 'args': [nodes]})
def get_constraint(self, node, name):
"""
Gets a constraint object of the node.
:param node: the node in question.
:type node: dict
:param name: the name of the constraint.
:type name: str
:returns: Returns the defined constraint or null if it was not\
defined for the node.
:rtype: dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getConstraint', 'args': [node, name]})
def get_constraint_names(self, node):
"""
Retrieves the list of constraint names defined for the node.
:param node: the node in question.
:type node: dict
:returns: Returns the array of names of constraints available for the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getConstraintNames', 'args': [node]})
def get_fco(self, node):
"""
Return the root of the inheritance chain of your Meta nodes.
:param node: any node in your project.
:type node: dict
:returns: Returns the acting FCO of your project.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getFCO', 'args': [node]})
def get_fully_qualified_name(self, node):
"""
Returns the fully qualified name of the node, which is the list of its namespaces separated\
by dot and followed by the name of the node.
:param node: the node in question.
:type node: dict
:returns: Returns the fully qualified name of the node,\
i.e. its namespaces and name join together by dots.
:rtype: str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getFullyQualifiedName', 'args': [node]})
def get_guid(self, node):
"""
Get the GUID of a node.
:param node: the node in question.
:type node: dict
:returns: Returns the globally unique identifier.
:rtype: str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getGuid', 'args': [node]})
def get_hash(self, node):
"""
Returns the calculated hash and database id of the data for the node.
:param node: the node in question.
:type node: dict
:returns: Returns the hash value of the data for the given node.\
An empty string is returned when the node was mutated and not persisted.
:rtype: str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getHash', 'args': [node]})
def get_instance_paths(self, node):
"""
Collects the paths of all the instances of the given node.
:param node: the node in question.
:type node: dict
:returns: The function returns an array of the absolute paths of the instances.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getInstancePaths', 'args': [node]})
def get_json_meta(self, node):
"""
Gives a JSON representation of the META rules of the node.
:param node: the node in question.
:type node: dict
:returns: Returns an object that represents all the META rules of the node.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getJsonMeta', 'args': [node]})
def get_library_guid(self, node, name=None):
"""
Returns the origin GUID of any library node. (If name is not provided the returned GUID will be the same\
across all projects where the library node is contained - regardless of library hierarchy.)
:param node: the node in question.
:type node: dict
:param name: name of the library where we want to compute the GUID from.\
If not given, then the GUID is computed from the direct library root of the node.
:type name: None or str
:returns: Returns the origin GUID of the node.
:rtype: str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getLibraryGuid', 'args': [node, name]})
def get_library_info(self, node, name):
"""
Returns the info associated with the library.
:param node: any node in the project.
:type node: dict
:param name: the name of the library.
:type name: str
:returns: Returns the information object, stored alongside the library (that basically\
carries metaData about the library).
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getLibraryInfo', 'args': [node, name]})
def get_library_meta_nodes(self, node, name, only_own=None):
"""
Returns all the Meta nodes within the given library.\
By default it will include nodes defined in any library within the given library.
:param node: any node of your project.
:type node: dict
:param name: name of your library.
:type name: str
:param only_own: if true only returns with Meta nodes defined in the library itself.
:type only_own: bool
:returns: Returns an array of core nodes that are part of your meta from\
the given library.
:rtype: list of dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getLibraryMetaNodes', 'args': [node, name, only_own]})
def get_library_names(self, node):
"""
Gives back the list of libraries in your project.
:param node: any node in your project.
:type node: dict
:returns: Returns the fully qualified names of all the libraries in your project\
(even embedded ones).
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getLibraryNames', 'args': [node]})
def get_library_root(self, node, name):
"""
Returns the root node of the given library.
:param node: any node in the project.
:type node: dict
:param name: the name of the library.
:type name: str
:returns: Returns the library root node or null, if the library is unknown.
:rtype: dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getLibraryRoot', 'args': [node, name]})
def get_member_attribute(self, node, set_name, path, attr_name):
"""
Get the value of the attribute in relation with the set membership.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param path: the absolute path of the member node.
:type path: str
:param attr_name: the name of the attribute.
:type attr_name: str
:returns: Return the value of the attribute. If it is undefined,\
then there is no such attributed connected to the given set membership.
:rtype: str or int or float or bool or dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getMemberAttribute', 'args': [node, set_name, path, attr_name]})
def get_member_attribute_names(self, node, name, path):
"""
Return the names of the attributes defined for the set membership to the member node.
:param node: the owner of the set.
:type node: dict
:param name: the name of the set.
:type name: str
:param path: the absolute path of the member.
:type path: str
:returns: Returns the array of names of attributes that represents some property of the membership.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getMemberAttributeNames', 'args': [node, name, path]})
def get_member_own_attribute(self, node, set_name, path, attr_name):
"""
Get the value of the attribute for the set membership specifically defined to the member node.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param path: the absolute path of the member node.
:type path: str
:param attr_name: the name of the attribute.
:type attr_name: str
:returns: Return the value of the attribute. If it is undefined,\
then there is no such attributed connected to the given set membership.
:rtype: str or int or float or bool or dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getMemberOwnAttribute', 'args': [node, set_name, path, attr_name]})
def get_member_own_attribute_names(self, node, name, path):
"""
Return the names of the attributes defined for the set membership specifically defined to the member node.
:param node: the owner of the set.
:type node: dict
:param name: the name of the set.
:type name: str
:param path: the absolute path of the member.
:type path: str
:returns: Returns the array of names of attributes that represents some property of the membership.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getMemberOwnAttributeNames', 'args': [node, name, path]})
def get_member_own_registry(self, node, set_name, path, reg_name):
"""
Get the value of the registry entry for the set membership specifically defined to the member node.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param path: the absolute path of the member node.
:type path: str
:param reg_name: the name of the registry entry.
:type reg_name: str
:returns: Return the value of the registry. If it is undefined,\
then there is no such registry connected to the given set membership.
:rtype: str or int or float or bool or dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getMemberOwnRegistry', 'args': [node, set_name, path, reg_name]})
def get_member_own_registry_names(self, node, name, path):
"""
Return the names of the registry entries defined for the set membership specifically defined to\
the member node.
:param node: the owner of the set.
:type node: dict
:param name: the name of the set.
:type name: str
:param path: the absolute path of the member.
:type path: str
:returns: Returns the array of names of registry entries that represents some property of the\
membership.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getMemberOwnRegistryNames', 'args': [node, name, path]})
def get_member_paths(self, node, name):
"""
Returns the list of absolute paths of the members of the given set of the given node.
:param node: the set owner.
:type node: dict
:param name: the name of the set.
:type name: str
:returns: Returns an array of absolute path strings of the member nodes of the set.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getMemberPaths', 'args': [node, name]})
def get_member_registry(self, node, set_name, path, reg_name):
"""
Get the value of the registry entry in relation with the set membership.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param path: the absolute path of the member node.
:type path: str
:param reg_name: the name of the registry entry.
:type reg_name: str
:returns: Return the value of the registry. If it is undefined,\
then there is no such registry connected to the given set membership.
:rtype: str or int or float or bool or dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getMemberRegistry', 'args': [node, set_name, path, reg_name]})
def get_member_registry_names(self, node, name, path):
"""
Return the names of the registry entries defined for the set membership to the member node.
:param node: the owner of the set.
:type node: dict
:param name: the name of the set.
:type name: str
:param path: the absolute path of the member.
:type path: str
:returns: Returns the array of names of registry entries that represents some property of the\
membership.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getMemberRegistryNames', 'args': [node, name, path]})
def get_meta_type(self, node):
"""
Returns the meta-node of the node in question, that is the first base node that is part of the meta.\
(Aliased getBaseType).
:param node: the node in question
:type node: dict
:returns: Returns the first node (including itself) among the inheritance chain\
that is a META node. It returns null if it does not find such node (ideally the only node with this result\
is the ROOT).
:rtype: dict or None
:raises CoreIllegalArgumentError: If node is not a Node
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getMetaType', 'args': [node]})
def get_mixin_errors(self, node):
"""
Checks if the mixins allocated with the node can be used.\
Every mixin node should be on the Meta.\
Every rule (attribute/pointer/set/aspect/containment/constraint) should be defined only in one mixin.
:param node: the node to test.
:type node: dict
:returns: Returns the array of violations. If the array is empty,\
there is no violation.
:rtype: list of dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getMixinErrors', 'args': [node]})
def get_mixin_nodes(self, node):
"""
Gathers the mixin nodes defined directly at the node.
:param node: the node in question.
:type node: dict
:returns: The dictionary of the mixin nodes keyed by their paths.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getMixinNodes', 'args': [node]})
def get_mixin_paths(self, node):
"""
Gathers the paths of the mixin nodes defined directly at the node.
:param node: the node in question.
:type node: dict
:returns: The paths of the mixins in an array ordered by their order of use (which is important\
in case of some collision among definitions would arise).
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getMixinPaths', 'args': [node]})
def get_namespace(self, node):
"""
Returns the resolved namespace for the node. If node is not in a library it returns the\
empty string. If the node is in a library of a library -\
the full name space is the library names joined together by dots.
:param node: the node in question.
:type node: dict
:returns: Returns the name space of the node.
:rtype: str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getNamespace', 'args': [node]})
def get_own_attribute(self, node, name):
"""
Returns the value of the attribute defined for the given node.
:param node: the node in question.
:type node: dict
:param name: the name of the attribute.
:type name: str
:returns: Returns the value of the attribute defined specifically for\
the node. If undefined then it means that there is no such attribute defined directly for the node, meaning\
that it either inherits some value or there is no such attribute at all.
:rtype: str or int or float or bool or dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnAttribute', 'args': [node, name]})
def get_own_attribute_names(self, node):
"""
Returns the names of the attributes of the node that have been first defined for the node and not for its\
bases.
:param node: the node in question.
:type node: dict
:returns: The function returns an array of the names of the own attributes of the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnAttributeNames', 'args': [node]})
def get_own_children_paths(self, parent):
"""
Collects the paths of all the children of the given node that has some data as well and not just inherited.
:param parent: the container node in question.
:type parent: dict
:returns: The function returns an array of the absolute paths of the children.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnChildrenPaths', 'args': [parent]})
def get_own_children_relids(self, node):
"""
Collects the relative ids of all the children of the given node that has some data and not just inherited.\
N.B. Do not mutate the returned array!
:param node: the container node in question.
:type node: dict
:returns: The function returns an array of the relative ids.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnChildrenRelids', 'args': [node]})
def get_own_constraint_names(self, node):
"""
Retrieves the list of constraint names defined specifically for the node.
:param node: the node in question.
:type node: dict
:returns: Returns the array of names of constraints for the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnConstraintNames', 'args': [node]})
def get_own_json_meta(self, node):
"""
Returns the META rules specifically defined for the given node.
:param node: the node in question.
:type node: dict
:returns: The function returns an object that represent the META rules that were defined\
specifically for the node.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnJsonMeta', 'args': [node]})
def get_own_member_paths(self, node, name):
"""
Returns the list of absolute paths of the members of the given set of the given node that not simply\
inherited.
:param node: the set owner.
:type node: dict
:param name: the name of the set.
:type name: str
:returns: Returns an array of absolute path strings of the member nodes of the set that has\
information on the node's inheritance level.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnMemberPaths', 'args': [node, name]})
def get_own_pointer_names(self, node):
"""
Returns the list of the names of the pointers that were defined specifically for the node.
:param node: the node in question.
:type node: dict
:returns: Returns an array of names of pointers defined specifically for the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnPointerNames', 'args': [node]})
def get_own_pointer_path(self, node, name):
"""
Returns the absolute path of the target of the pointer specifically defined for the node.
:param node: the node in question
:type node: dict
:param name: the name of the pointer
:type name: str
:returns: Returns the absolute path. If the path is null, then it means that\
'no-target' was defined specifically for this node for the pointer. If undefined it means that the node\
either inherits the target of the pointer or there is no pointer defined at all.
:rtype: str or None or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnPointerPath', 'args': [node, name]})
def get_own_registry(self, node, name):
"""
Returns the value of the registry entry defined for the given node.
:param node: the node in question.
:type node: dict
:param name: the name of the registry entry.
:type name: str
:returns: Returns the value of the registry entry defined specifically\
for the node. If undefined then it means that there is no such registry entry defined directly for the node,\
meaning that it either inherits some value or there is no such registry entry at all.
:rtype: str or int or float or bool or dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnRegistry', 'args': [node, name]})
def get_own_registry_names(self, node):
"""
Returns the names of the registry enrties of the node that have been first defined for the node\
and not for its bases.
:param node: the node in question.
:type node: dict
:returns: The function returns an array of the names of the own registry entries of the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnRegistryNames', 'args': [node]})
def get_own_set_attribute(self, node, set_name, attr_name):
"""
Get the value of the attribute entry specifically set for the set at the node.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param attr_name: the name of the attribute entry.
:type attr_name: str
:returns: Return the value of the attribute. If it is undefined,\
then there is no such attribute at the set.
:rtype: str or int or float or bool or dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnSetAttribute', 'args': [node, set_name, attr_name]})
def get_own_set_attribute_names(self, node, name):
"""
Return the names of the attribute entries specifically set for the set at the node.
:param node: the owner of the set.
:type node: dict
:param name: the name of the set.
:type name: str
:returns: Returns the array of names of attribute entries defined in the set at the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnSetAttributeNames', 'args': [node, name]})
def get_own_set_names(self, node):
"""
Returns the names of the sets created specifically at the node.\
N.B. When adding a member to a set of a node, the set is automatically created at the node.
:param node: the node in question.
:type node: dict
:returns: Returns an array of set names that were specifically created at the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnSetNames', 'args': [node]})
def get_own_set_registry(self, node, set_name, reg_name):
"""
Get the value of the registry entry specifically set for the set at the node.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param reg_name: the name of the registry entry.
:type reg_name: str
:returns: Return the value of the registry. If it is undefined,\
then there is no such registry at the set.
:rtype: str or int or float or bool or dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnSetRegistry', 'args': [node, set_name, reg_name]})
def get_own_set_registry_names(self, node, name):
"""
Return the names of the registry entries specifically set for the set at the node.
:param node: the owner of the set.
:type node: dict
:param name: the name of the set.
:type name: str
:returns: Returns the array of names of registry entries defined in the set at the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnSetRegistryNames', 'args': [node, name]})
def get_own_valid_aspect_names(self, node):
"""
Returns the list of the META defined aspect names of the node that were specifically defined for the node.
:param node: the node in question.
:type node: dict
:returns: The function returns the aspect names that are specifically defined for the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnValidAspectNames', 'args': [node]})
def get_own_valid_aspect_target_paths(self, node, name):
"""
Returns the paths of the meta nodes that are valid target members of the given aspect\
specifically defined for the node.
:param node: the node in question.
:type node: dict
:param name: the name of the aspec in question.
:type name: str
:returns: The paths of the meta nodes whose instances could be members of the aspect.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnValidAspectTargetPaths', 'args': [node, name]})
def get_own_valid_attribute_names(self, node):
"""
Returns the list of the META defined attribute names of the node that were specifically defined for the node.
:param node: the node in question.
:type node: dict
:returns: The function returns the attribute names that are defined specifically for the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnValidAttributeNames', 'args': [node]})
def get_own_valid_pointer_names(self, node):
"""
Returns the list of the META defined pointer names of the node that were specifically defined for the node.
:param node: the node in question.
:type node: dict
:returns: The function returns all the pointer names that are defined among the META\
rules of the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnValidPointerNames', 'args': [node]})
def get_own_valid_set_names(self, node):
"""
Returns the list of the META defined set names of the node that were specifically defined for the node.
:param node: the node in question.
:type node: dict
:returns: The function returns all the set names that are defined among the META rules of the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnValidSetNames', 'args': [node]})
def get_own_valid_target_paths(self, node, name):
"""
Returns the paths of Meta nodes that are possible targets of the given pointer/set introduced by the node.
:param node: the node in question.
:type node: dict
:param name: the name of pointer/set.
:type name: str
:returns: The function returns the paths of valid nodes.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getOwnValidTargetPaths', 'args': [node, name]})
def get_parent(self, node):
"""
Returns the parent of the node.
:param node: the node in question
:type node: dict
:returns: Returns the parent of the node or NULL if it has no parent.
:rtype: dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getParent', 'args': [node]})
def get_path(self, node):
"""
Returns the complete path of the node in the containment hierarchy.
:param node: the node in question.
:type node: dict
:returns: Returns a path string where each portion is a relative id and they are separated by '/'.\
The path can be empty as well if the node in question is the root itself, otherwise it should be a chain\
of relative ids from the root of the containment hierarchy.
:rtype: str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getPath', 'args': [node]})
def get_pointer_definition_info(self, node, name, target):
"""
Returns the meta nodes that introduce the given pointer relationship.
:param node: the node in question.
:type node: dict
:param name: the name of the pointer in question.
:type name: str
:param target: the target node.
:type target: dict
:returns: The owner and the target of the pointer meta-rule that makes target a\
valid target of the named pointer of node.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getPointerDefinitionInfo', 'args': [node, name, target]})
def get_pointer_meta(self, node, name):
"""
Return a JSON representation of the META rules regarding the given pointer/set of the given node.
:param node: the node in question.
:type node: dict
:param name: the name of the pointer/set.
:type name: str
:returns: The function returns a detailed JSON structure that\
represents the META rules regarding the given pointer/set of the node.
:rtype: dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getPointerMeta', 'args': [node, name]})
def get_pointer_names(self, node):
"""
Retrieves a list of the defined pointer names of the node.
:param node: the node in question.
:type node: dict
:returns: The function returns an array of the names of the pointers of the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getPointerNames', 'args': [node]})
def get_pointer_path(self, node, name):
"""
Retrieves the path of the target of the given pointer of the given node.
:param node: the node in question.
:type node: dict
:param name: the name of the pointer in question.
:type name: str
:returns: The function returns the absolute path of the target node\
if there is a valid target. It returns null if though the pointer is defined it does not have any\
valid target. Finally, it return undefined if there is no pointer defined for the node under the given name.
:rtype: str or None or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getPointerPath', 'args': [node, name]})
def get_registry(self, node, name):
"""
Retrieves the value of the given registry entry of the given node.
:param node: the node in question.
:type node: dict
:param name: the name of the registry entry.
:type name: str
:returns: The function returns the value of the registry entry\
of the node. The value can be an object or any primitive type. If the value is undefined that means\
the node do not have such attribute defined.
:rtype: str or int or float or bool or dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getRegistry', 'args': [node, name]})
def get_registry_names(self, node):
"""
Returns the names of the defined registry entries of the node.
:param node: the node in question.
:type node: dict
:returns: The function returns an array of the names of the registry entries of the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getRegistryNames', 'args': [node]})
def get_relid(self, node):
"""
Returns the parent-relative identifier of the node.
:param node: the node in question.
:type node: dict
:returns: Returns the last segment of the node path.
:rtype: str or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getRelid', 'args': [node]})
def get_root(self, node):
"""
Returns the root node of the containment tree that node is part of.
:param node: the node in question.
:type node: dict
:returns: Returns the root of the containment hierarchy (it can be the node itself).
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getRoot', 'args': [node]})
def get_set_attribute(self, node, set_name, attr_name):
"""
Get the value of the attribute entry in the set.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param attr_name: the name of the attribute entry.
:type attr_name: str
:returns: Return the value of the attribute. If it is undefined,\
then there is no such attribute at the set.
:rtype: str or int or float or bool or dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getSetAttribute', 'args': [node, set_name, attr_name]})
def get_set_attribute_names(self, node, name):
"""
Return the names of the attribute entries for the set.
:param node: the owner of the set.
:type node: dict
:param name: the name of the set.
:type name: str
:returns: Returns the array of names of attribute entries in the set.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getSetAttributeNames', 'args': [node, name]})
def get_set_definition_info(self, node, name, member):
"""
Returns the meta nodes that introduce the given set relationship.
:param node: the node in question.
:type node: dict
:param name: the name of the set in question.
:type name: str
:param member: the member.
:type member: dict
:returns: The owner and the target of the set meta-rule that makes member a\
valid member of the named set of node.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getSetDefinitionInfo', 'args': [node, name, member]})
def get_set_names(self, node):
"""
Returns the names of the sets of the node.
:param node: the node in question.
:type node: dict
:returns: Returns an array of set names that the node has.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getSetNames', 'args': [node]})
def get_set_registry(self, node, set_name, reg_name):
"""
Get the value of the registry entry in the set.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param reg_name: the name of the registry entry.
:type reg_name: str
:returns: Return the value of the registry. If it is undefined,\
then there is no such registry at the set.
:rtype: str or int or float or bool or dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getSetRegistry', 'args': [node, set_name, reg_name]})
def get_set_registry_names(self, node, name):
"""
Return the names of the registry entries for the set.
:param node: the owner of the set.
:type node: dict
:param name: the name of the set.
:type name: str
:returns: Returns the array of names of registry entries in the set.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getSetRegistryNames', 'args': [node, name]})
def get_type_root(self, node):
"""
Returns the root of the inheritance chain (cannot be the node itself).
:param node: the node in question.
:type node: dict
:returns: Returns the root of the inheritance chain of the node. If returns null,\
that means the node in question is the root of the chain.
:rtype: dict or None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getTypeRoot', 'args': [node]})
def get_valid_aspect_names(self, node):
"""
Returns the list of the META defined aspect names of the node.
:param node: the node in question.
:type node: dict
:returns: The function returns all the aspect names that are defined among the META rules of the\
node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getValidAspectNames', 'args': [node]})
def get_valid_aspect_target_paths(self, node, name):
"""
Returns the paths of the meta nodes that are valid target members of the given aspect.
:param node: the node in question.
:type node: dict
:param name: the name of the aspec in question.
:type name: str
:returns: The paths of the meta nodes whose instances could be members of the aspect.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getValidAspectTargetPaths', 'args': [node, name]})
def get_valid_attribute_names(self, node):
"""
Returns the list of the META defined attribute names of the node.
:param node: the node in question.
:type node: dict
:returns: The function returns all the attribute names that are defined among the META rules of the\
node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getValidAttributeNames', 'args': [node]})
def get_valid_children_meta_nodes(self, parameters):
"""
Retrieves the valid META nodes that can be base of a child of the node.
:param parameters: the input parameters of the query.
:type parameters: dict
:returns: The function returns a list of valid nodes that can be instantiated as a\
child of the node.
:rtype: list of dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getValidChildrenMetaNodes', 'args': [parameters]})
def get_valid_children_paths(self, node):
"""
Returns the list of absolute path of the valid children types of the node.
:param node: the node in question.
:type node: dict
:returns: The function returns an array of absolute paths of the nodes that was defined as valid\
children for the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getValidChildrenPaths', 'args': [node]})
def get_valid_pointer_names(self, node):
"""
Returns the list of the META defined pointer names of the node.
:param node: the node in question.
:type node: dict
:returns: The function returns all the pointer names that are defined among the META rules\
of the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getValidPointerNames', 'args': [node]})
def get_valid_set_elements_meta_nodes(self, parameters):
"""
Retrieves the valid META nodes that can be base of a member of the set of the node.
:param parameters: the input parameters of the query.
:type parameters: dict
:returns: The function returns a list of valid nodes that can be instantiated as a\
member of the set of the node.
:rtype: list of dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getValidSetElementsMetaNodes', 'args': [parameters]})
def get_valid_set_names(self, node):
"""
Returns the list of the META defined set names of the node.
:param node: the node in question.
:type node: dict
:returns: The function returns all the set names that are defined among the META rules of the node.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getValidSetNames', 'args': [node]})
def get_valid_target_paths(self, node, name):
"""
Returns the paths of Meta nodes that are possible targets of the given pointer/set.
:param node: the node in question.
:type node: dict
:param name: the name of pointer/set.
:type name: str
:returns: The function returns the paths of valid nodes.
:rtype: list of str
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'getValidTargetPaths', 'args': [node, name]})
def import_closure(self, node, closure_information):
"""
Imports the set of nodes in the closureInformation - that has the format created by\
[getClosureInformation]{@link Core#getClosureInformation} - as direct children of the parent node.\
All data necessary for importing the closure has to be imported beforehand!
:param node: the parent node where the closure will be imported.
:type node: dict
:param closure_information: the information about the closure.
:type closure_information: dict
:returns: If the closure cannot be imported the resulting error highlights the causes,\
otherwise a specific object will be returned that holds information about the closure.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'importClosure', 'args': [node, closure_information]})
def is_abstract(self, node):
"""
Checks if the node is abstract.
:param node: the node in question.
:type node: dict
:returns: The function returns true if the registry entry 'isAbstract' of the node if true hence\
the node is abstract.
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isAbstract', 'args': [node]})
def is_connection(self, node):
"""
Check is the node is a connection-like node.
:param node: the node in question.
:type node: dict
:returns: Returns true if both the 'src' and 'dst' pointer are defined as valid for the node.
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isConnection', 'args': [node]})
def is_empty(self, node):
"""
Checks if the node in question has some actual data.
:param node: the node in question.
:type node: dict
:returns: Returns true if the node is 'empty' meaning that it is not reserved by real data.\
Returns false if the node is exists and have some meaningful value.
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isEmpty', 'args': [node]})
def is_fully_overridden_member(self, node, name, path):
"""
Checks if the member is completely overridden in the set of the node.
:param node: the node to test.
:type node: dict
:param name: the name of the set of the node.
:type name: str
:param path: the path of the member in question.
:type path: str
:returns: Returns true if the member exists in the base of the set, but was\
added to the given set as well, which means a complete override. If the set does not exist\
or the member do not have a 'base' member or just some property was overridden, the function returns\
false.
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isFullyOverriddenMember', 'args': [node, name, path]})
def is_instance_of(self, node, base_node_or_path):
"""
Checks if the node is an instance of base.
:param node: the node in question.
:type node: dict
:param base_node_or_path: a potential base node (or its path) of the node
:type base_node_or_path: dict or str
:returns: Returns true if the base is on the inheritance chain of node.\
A node is considered to be an instance of itself here.
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isInstanceOf', 'args': [node, base_node_or_path]})
def is_library_element(self, node):
"""
Returns true if the node in question is a library element..
:param node: the node in question.
:type node: dict
:returns: Returns true if your node is a library element, false otherwise.
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isLibraryElement', 'args': [node]})
def is_library_root(self, node):
"""
Returns true if the node in question is a library root..
:param node: the node in question.
:type node: dict
:returns: Returns true if your node is a library root (even if it is embedded in other library),\
false otherwise.
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isLibraryRoot', 'args': [node]})
def is_member_of(self, node):
"""
Returns all membership information of the given node.
:param node: the node in question
:type node: dict
:returns: Returns a dictionary where every the key of every entry is an absolute path of a set owner\
node. The value of each entry is an array with the set names in which the node can be found as a member.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isMemberOf', 'args': [node]})
def is_meta_node(self, node):
"""
Checks if the node is a META node.
:param node: the node to test.
:type node: dict
:returns: Returns true if the node is a member of the METAAspectSet of the ROOT node hence can be\
seen as a META node.
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isMetaNode', 'args': [node]})
def is_type_of(self, node, type_node_or_path):
"""
Checks if the given node in any way inherits from the typeNode. In addition to checking if the node\
"isInstanceOf" of typeNode, this methods also takes mixins into account.
:param node: the node in question.
:type node: dict
:param type_node_or_path: the type node we want to check or its path.
:type type_node_or_path: dict or str
:returns: The function returns true if the typeNodeOrPath represents a base node,\
or a mixin of any of the base nodes, of the node.\
Every node is considered to be a type of itself.
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isTypeOf', 'args': [node, type_node_or_path]})
def is_valid_aspect_member_of(self, node, parent, name):
"""
Returns if a node could be contained in the given container's aspect.
:param node: the node in question.
:type node: dict
:param parent: the container node in question.
:type parent: dict
:param name: the name of aspect.
:type name: str
:returns: The function returns true if the given container could contain the node in the asked aspect.
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isValidAspectMemberOf', 'args': [node, parent, name]})
def is_valid_attribute_value_of(self, node, name, value):
"""
Checks if the given value is of the necessary type, according to the META rules.
:param node: the node in question.
:type node: dict
:param name: the name of the attribute.
:type name: str
:param value: the value to test.
:type value: str or int or float or bool or dict
:returns: Returns true if the value matches the META definitions.
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isValidAttributeValueOf', 'args': [node, name, value]})
def is_valid_child_of(self, node, parent):
"""
Checks if according to the META rules the given node can be a child of the parent.
:param node: the node in question
:type node: dict
:param parent: the parent we like to test.
:type parent: dict
:returns: The function returns true if according to the META rules the node can be a child of the\
parent. The check does not cover multiplicity (so if the parent can only have twi children and it already\
has them, this function will still returns true).
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isValidChildOf', 'args': [node, parent]})
def is_valid_new_base(self, node, base):
"""
Checks if base can be the new base of node.
:param node: the node in question.
:type node: dict
:param base: the new base.
:type base: dict or None or None
:returns: True if the supplied base is a valid base for the node.
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isValidNewBase', 'args': [node, base]})
def is_valid_new_child(self, parent_node, base_node):
"""
Checks if an instance of the given base can be created under the parent. It does not check for\
meta consistency. It only validates if the proposed creation would cause any loops in the\
combined containment inheritance trees.
:param parent_node: the parent in question.
:type parent_node: dict or None
:param base_node: the intended type of the node.
:type base_node: dict or None
:returns: True if a child of the type can be created.
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isValidNewChild', 'args': [parent_node, base_node]})
def is_valid_new_parent(self, node, parent):
"""
Checks if parent can be the new parent of node.
:param node: the node in question.
:type node: dict
:param parent: the new parent.
:type parent: dict
:returns: True if the supplied parent is a valid parent for the node.
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isValidNewParent', 'args': [node, parent]})
def is_valid_target_of(self, node, source, name):
"""
Checks if the node can be a target of a pointer of the source node in accordance with the META rules.
:param node: the node in question.
:type node: dict
:param source: the source to test.
:type source: dict
:param name: the name of the pointer.
:type name: str
:returns: The function returns true if according to the META rules, the given node is a valid\
target of the given pointer of the source.
:rtype: bool
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'isValidTargetOf', 'args': [node, source, name]})
def load_by_path(self, node, relative_path):
"""
From the given starting node, it loads the path given as a series of relative ids (separated by '/')\
and returns the node it finds at the ends of the path. If there is no node, the function will return null.
:param node: the starting node of our search.
:type node: dict
:param relative_path: the relative path - built by relative ids - of the node in question.
:type relative_path: str
:returns: the resulting node
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the result of the execution
:raises CoreInternalError: the result of the execution
"""
return self._send({'name': 'loadByPath', 'args': [node, relative_path]})
def load_child(self, parent, relative_id):
"""
Loads the child of the given parent pointed by the relative id. Behind the scenes, it means\
that it actually loads the data pointed by a hash stored inside the parent under the given id\
and wraps it in a node object which will be connected to the parent as a child in the containment\
hierarchy. If there is no such relative id reserved, the call will return with null.
:param parent: the container node in question.
:type parent: dict
:param relative_id: the relative id of the child in question.
:type relative_id: str
:returns: the resulting child
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the result of the execution
:raises CoreInternalError: the result of the execution
"""
return self._send({'name': 'loadChild', 'args': [parent, relative_id]})
def load_children(self, node):
"""
Loads all the children of the given parent. As it first checks the already reserved relative ids of\
the parent, it only loads the already existing children (so no on-demand empty node creation).
:param node: the container node in question.
:type node: dict
:returns: the resulting children
:rtype: list of dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the result of the execution
:raises CoreInternalError: the result of the execution
"""
return self._send({'name': 'loadChildren', 'args': [node]})
def load_collection(self, node, pointer_name):
"""
Loads all the source nodes that has such a pointer and its target is the given node.
:param node: the target node in question.
:type node: dict
:param pointer_name: the name of the pointer of the sources.
:type pointer_name: str
:returns: the resulting sources
:rtype: list of dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the result of the execution
:raises CoreInternalError: the result of the execution
"""
return self._send({'name': 'loadCollection', 'args': [node, pointer_name]})
def load_instances(self, node):
"""
Loads all the instances of the given node.
:param node: the node in question.
:type node: dict
:returns: the found instances of the node.
:rtype: list of dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the status of the execution.
:raises CoreInternalError: the status of the execution.
"""
return self._send({'name': 'loadInstances', 'args': [node]})
def load_members(self, node, set_name):
"""
Loads all the members of the given set of the node.
:param node: the node in question.
:type node: dict
:param set_name: the name of the set in question.
:type set_name: str
:returns: the found members of the set of the node.
:rtype: list of dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the status of the execution.
:raises CoreInternalError: the status of the execution.
"""
return self._send({'name': 'loadMembers', 'args': [node, set_name]})
def load_own_children(self, node):
"""
Loads all the children of the given parent that has some data and not just inherited. As it first checks\
the already reserved relative ids of the parent, it only loads the already existing children\
(so no on-demand empty node creation).
:param node: the container node in question.
:type node: dict
:returns: the resulting children
:rtype: list of dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the result of the execution
:raises CoreInternalError: the result of the execution
"""
return self._send({'name': 'loadOwnChildren', 'args': [node]})
def load_own_members(self, node, set_name):
"""
Loads all the own members of the given set of the node.
:param node: the node in question.
:type node: dict
:param set_name: the name of the set in question.
:type set_name: str
:returns: the found own members of the set of the node.
:rtype: list of dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the status of the execution.
:raises CoreInternalError: the status of the execution.
"""
return self._send({'name': 'loadOwnMembers', 'args': [node, set_name]})
def load_own_sub_tree(self, node):
"""
Loads a complete sub-tree of the containment hierarchy starting from the given node, but load only those\
children that has some additional data and not purely inherited.
:param node: the container node in question.
:type node: dict
:returns: the resulting sources
:rtype: list of dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the result of the execution
:raises CoreInternalError: the result of the execution
"""
return self._send({'name': 'loadOwnSubTree', 'args': [node]})
def load_pointer(self, node, pointer_name):
"""
Loads the target of the given pointer of the given node. In the callback the node can have three values:\
if the node is valid, then it is the defined target of a valid pointer,\
if the returned value is null, then it means that the pointer is defined, but has no real target,\
finally if the returned value is undefined than there is no such pointer defined for the given node.
:param node: the source node in question.
:type node: dict
:param pointer_name: the name of the pointer.
:type pointer_name: str
:returns: the resulting target
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the result of the execution
:raises CoreInternalError: the result of the execution
"""
return self._send({'name': 'loadPointer', 'args': [node, pointer_name]})
def load_root(self, hash):
"""
Loads the data object with the given hash and makes it a root of a containment hierarchy.
:param hash: the hash of the data object we like to load as root.
:type hash: str
:returns: the resulting root node
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the result of the execution
:raises CoreInternalError: the result of the execution
"""
return self._send({'name': 'loadRoot', 'args': [hash]})
def load_sub_tree(self, node):
"""
Loads a complete sub-tree of the containment hierarchy starting from the given node.
:param node: the node that is the root of the sub-tree in question.
:type node: dict
:returns: the resulting sources
:rtype: list of dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the result of the execution
:raises CoreInternalError: the result of the execution
"""
return self._send({'name': 'loadSubTree', 'args': [node]})
def move_aspect_meta_target(self, node, target, old_name, new_name):
"""
Moves the given target definition over to a new aspect. As actual values in case of\
relation definitions vary quite a bit from the meta-targets, this function does not deals with\
the actual pointer/set target/members.
:param node: the node in question.
:type node: dict
:param target: the target that should be moved among definitions.
:type target: dict
:param old_name: the current name of the aspect that has the target.
:type old_name: str
:param new_name: the new aspect name where the target should be moved over.
:type new_name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'moveAspectMetaTarget', 'args': [node, target, old_name, new_name]})
def move_member(self, node, member_path, old_set_name, new_set_name):
"""
Moves an own member of the set over to another set of the node.
:param node: the node in question.
:type node: dict
:param member_path: the path of the memberNode that should be moved.
:type member_path: str
:param old_set_name: the name of the set where the member is currently reside.
:type old_set_name: str
:param new_set_name: the name of the target set where the member should be moved to.
:type new_set_name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'moveMember', 'args': [node, member_path, old_set_name, new_set_name]})
def move_node(self, node, parent):
"""
Moves the given node under the given parent.
:param node: the node to be moved.
:type node: dict
:param parent: the parent node of the copy.
:type parent: dict
:returns: The function returns the node after the move.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'moveNode', 'args': [node, parent]})
def move_pointer_meta_target(self, node, target, old_name, new_name):
"""
Moves the given target definition over to a new pointer or set.\
Note this does not alter the actual pointer target or set members.
:param node: the node in question.
:type node: dict
:param target: the target that should be moved among definitions.
:type target: dict
:param old_name: the current name of the pointer/set definition in question.
:type old_name: str
:param new_name: the new name of the relation towards the target.
:type new_name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'movePointerMetaTarget', 'args': [node, target, old_name, new_name]})
def persist(self, node):
"""
Persists the changes made in memory and computed the data blobs that needs to be saved into the database\
to make the change and allow other users to see the new state of the project.
:param node: some node element of the modified containment hierarchy (usually the root).
:type node: dict
:returns: The function returns an object which collects all the changes\
on data level and necessary to update the database on server side. Keys of the returned object are 'rootHash'\
and 'objects'. The values of these should be passed to project.makeCommit.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'persist', 'args': [node]})
def remove_library(self, node, name):
"""
Removes a library from your project. It will also remove any remaining instances of the specific library.
:param node: any node in your project.
:type node: dict
:param name: the name of your library.
:type name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'removeLibrary', 'args': [node, name]})
def rename_attribute(self, node, old_name, new_name):
"""
Renames the given attribute of the node if its value is not inherited.
:param node: the node in question.
:type node: dict
:param old_name: the current name of the attribute in question.
:type old_name: str
:param new_name: the new name of the attribute.
:type new_name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'renameAttribute', 'args': [node, old_name, new_name]})
def rename_attribute_meta(self, node, old_name, new_name):
"""
Renames the given attribute definition of the node. It also renames the default value of the definition!\
As a result of this operation, all instances of node will have the new attribute, but if they have\
overriden the old attribute it will remain under that name (and become meta invalid).
:param node: the node in question.
:type node: dict
:param old_name: the current name of the attribute definition in question.
:type old_name: str
:param new_name: the new name of the attribute.
:type new_name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'renameAttributeMeta', 'args': [node, old_name, new_name]})
def rename_library(self, node, old_name, new_name):
"""
Rename a library in your project.
:param node: any node in your project.
:type node: dict
:param old_name: the current name of the library.
:type old_name: str
:param new_name: the new name of the project.
:type new_name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'renameLibrary', 'args': [node, old_name, new_name]})
def rename_pointer(self, node, old_name, new_name):
"""
Renames the given pointer of the node if its target is not inherited.
:param node: the node in question.
:type node: dict
:param old_name: the current name of the pointer in question.
:type old_name: str
:param new_name: the new name of the pointer.
:type new_name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'renamePointer', 'args': [node, old_name, new_name]})
def rename_registry(self, node, old_name, new_name):
"""
Renames the given registry of the node if its value is not inherited.
:param node: the node in question.
:type node: dict
:param old_name: the current name of the registry in question.
:type old_name: str
:param new_name: the new name of the registry.
:type new_name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'renameRegistry', 'args': [node, old_name, new_name]})
def rename_set(self, node, old_name, new_name):
"""
Renames the given set of the node if its is not inherited.
:param node: the node in question.
:type node: dict
:param old_name: the current name of the set in question.
:type old_name: str
:param new_name: the new name of the set.
:type new_name: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'renameSet', 'args': [node, old_name, new_name]})
def set_aspect_meta_target(self, node, name, target):
"""
Sets a valid type for the given aspect of the node.
:param node: the node in question.
:type node: dict
:param name: the name of the aspect.
:type name: str
:param target: the valid type for the aspect.
:type target: dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'setAspectMetaTarget', 'args': [node, name, target]})
def set_attribute(self, node, name, value):
"""
Sets the value of the given attribute of the given node. It defines the attribute on demand, means that it\
will set the given attribute even if was ot defined for the node beforehand.
:param node: the node in question.
:type node: dict
:param name: the name of the attribute.
:type name: str
:param value: the new of the attribute, undefined is not allowed.
:type value: str or int or float or bool or dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'setAttribute', 'args': [node, name, value]})
def set_attribute_meta(self, node, name, rule):
"""
Sets the META rules of the attribute of the node.
:param node: the node in question.
:type node: dict
:param name: the name of the attribute.
:type name: str
:param rule: the rules that defines the attribute
:type rule: dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'setAttributeMeta', 'args': [node, name, rule]})
def set_base(self, node, base):
"""
Sets the base node of the given node. The function doesn't touches the properties or the children of the node\
so it can cause META rule violations that needs to be corrected manually.
:param node: the node in question.
:type node: dict
:param base: the new base.
:type base: dict or None
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'setBase', 'args': [node, base]})
def set_child_meta(self, node, child, min=None, max=None):
"""
Sets the given child as a valid children type for the node.
:param node: the node in question.
:type node: dict
:param child: the valid child node.
:type child: dict
:param min: the allowed minimum number of children from this given node type (if not given or\
-1 is set, then there will be no minimum rule according this child type)
:type min: int
:param max: the allowed maximum number of children from this given node type (if not given or\
-1 is set, then there will be no minimum rule according this child type)
:type max: int
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'setChildMeta', 'args': [node, child, min, max]})
def set_children_meta_limits(self, node, min=None, max=None):
"""
Sets the global containment limits for the node.
:param node: the node in question.
:type node: dict
:param min: the allowed minimum number of children (if not given or\
-1 is set, then there will be no minimum rule according children)
:type min: int
:param max: the allowed maximum number of children (if not given or\
-1 is set, then there will be no maximum rule according children)
:type max: int
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'setChildrenMetaLimits', 'args': [node, min, max]})
def set_constraint(self, node, name, constraint):
"""
Sets a constraint object of the node.
:param node: the node in question.
:type node: dict
:param name: the name of the constraint.
:type name: str
:param constraint: the constraint to be set.
:type constraint: dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'setConstraint', 'args': [node, name, constraint]})
def set_guid(self, node, guid):
"""
Set the GUID of a node. As the Core itself do not checks whether the GUID already exists. The use of\
this function is only advised during the creation of the node.
:param node: the node in question.
:type node: dict
:param guid: the new globally unique identifier.
:type guid: str
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the result of the execution.
:raises CoreIllegalOperationError: the result of the execution.
:raises CoreInternalError: the result of the execution.
"""
return self._send({'name': 'setGuid', 'args': [node, guid]})
def set_member_attribute(self, node, set_name, path, attr_name, value):
"""
Sets the attribute value which represents a property of the membership.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param path: the absolute path of the member node.
:type path: str
:param attr_name: the name of the attribute.
:type attr_name: str
:param value: the new value of the attribute.
:type value: str or int or float or bool or dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'setMemberAttribute', 'args': [node, set_name, path, attr_name, value]})
def set_member_registry(self, node, set_name, path, reg_name, value):
"""
Sets the registry entry value which represents a property of the membership.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param path: the absolute path of the member node.
:type path: str
:param reg_name: the name of the registry entry.
:type reg_name: str
:param value: the new value of the registry.
:type value: str or int or float or bool or dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'setMemberRegistry', 'args': [node, set_name, path, reg_name, value]})
def set_pointer(self, node, name, target):
"""
Sets the target of the pointer of the node.
:param node: the node in question.
:type node: dict
:param name: the name of the pointer in question.
:type name: str
:param target: the new target of the pointer.
:type target: dict or None
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'setPointer', 'args': [node, name, target]})
def set_pointer_meta_limits(self, node, name, min=None, max=None):
"""
Sets the global target limits for pointer/set of the node. On META level the only distinction between\
pointer and sets is the global multiplicity which has to maximize the number of possible targets to 1 in\
case of 'pure' pointer definitions.
:param node: the node in question.
:type node: dict
:param name: the name of the pointer/set.
:type name: str
:param min: the allowed minimum number of children (if not given or\
-1 is set, then there will be no minimum rule according targets)
:type min: int
:param max: the allowed maximum number of children (if not given or\
-1 is set, then there will be no maximum rule according targets)
:type max: int
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'setPointerMetaLimits', 'args': [node, name, min, max]})
def set_pointer_meta_target(self, node, name, target, min=None, max=None):
"""
Sets the given target as a valid target type for the pointer/set of the node.
:param node: the node in question.
:type node: dict
:param name: the name of the pointer/set.
:type name: str
:param target: the valid target/member node.
:type target: dict
:param min: the allowed minimum number of target/member from this given node type (if not\
given or -1 is set, then there will be no minimum rule according this target type)
:type min: int
:param max: the allowed maximum number of target/member from this given node type (if not\
given or -1 is set, then there will be no minimum rule according this target type)
:type max: int
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'setPointerMetaTarget', 'args': [node, name, target, min, max]})
def set_registry(self, node, name, value):
"""
Sets the value of the given registry entry of the given node. It defines the registry entry on demand,\
means that it will set the given registry entry even if was ot defined for the node beforehand.
:param node: the node in question.
:type node: dict
:param name: the name of the registry entry.
:type name: str
:param value: the new of the registry entry. Can be any primitive\
type or object. Undefined is not allowed.
:type value: str or int or float or bool or dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'setRegistry', 'args': [node, name, value]})
def set_set_attribute(self, node, set_name, attr_name, value):
"""
Sets the attribute entry value for the set at the node.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param attr_name: the name of the attribute entry.
:type attr_name: str
:param value: the new value of the attribute.
:type value: str or int or float or bool or dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'setSetAttribute', 'args': [node, set_name, attr_name, value]})
def set_set_registry(self, node, set_name, reg_name, value):
"""
Sets the registry entry value for the set at the node.
:param node: the owner of the set.
:type node: dict
:param set_name: the name of the set.
:type set_name: str
:param reg_name: the name of the registry entry.
:type reg_name: str
:param value: the new value of the registry.
:type value: str or int or float or bool or dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreIllegalOperationError: If the context of the operation is not allowed.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'setSetRegistry', 'args': [node, set_name, reg_name, value]})
def try_to_concat_changes(self, mine, theirs):
"""
Tries to merge two patch object. The patches ideally represents changes made by two parties. They represents\
changes from the same source ending in different states. Our aim is to generate a single patch that could\
cover the changes of both party.
:param mine: the tree structured JSON patch that represents my changes.
:type mine: dict
:param theirs: the tree structured JSON patch that represents the changes of the other party.
:type theirs: dict
:returns: The function returns with an object that contains the conflicts (if any) and the merged\
patch.
:rtype: dict
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises CoreInternalError: If some internal error took place inside the core layers.
"""
return self._send({'name': 'tryToConcatChanges', 'args': [mine, theirs]})
def update_library(self, node, name, library_root_hash, library_info=None):
"""
It updates a library in your project based on the input information. It will 'reaplace' the old\
version, keeping as much information as possible regarding the instances.
:param node: any regular node in your project.
:type node: dict
:param name: the name of the library you want to update.
:type name: str
:param library_root_hash: the hash of your library's new root\
(must exist in the project's collection at the time of call).
:type library_root_hash: str
:param library_info: information about your project.
:type library_info: dict
:returns: Nothing is returned by the function.
:rtype: None
:raises CoreIllegalArgumentError: If some of the parameters don't match the input criteria.
:raises JSError: the status of the execution.
:raises CoreIllegalOperationError: the status of the execution.
:raises CoreInternalError: the status of the execution.
"""
return self._send({'name': 'updateLibrary', 'args': [node, name, library_root_hash, library_info]})
| 51.047572
| 542
| 0.646202
| 20,132
| 153,449
| 4.879446
| 0.041129
| 0.040058
| 0.023454
| 0.034998
| 0.807193
| 0.781021
| 0.759958
| 0.743396
| 0.727749
| 0.716266
| 0
| 0.000082
| 0.284277
| 153,449
| 3,005
| 543
| 51.064559
| 0.894352
| 0.690249
| 0
| 0
| 0
| 0
| 0.165787
| 0.026021
| 0
| 0
| 0
| 0
| 0
| 1
| 0.491139
| false
| 0
| 0.005063
| 0
| 0.987342
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
2805d45725a936cc86375d23d80444da219b7eea
| 354
|
py
|
Python
|
twitch_info/__init__.py
|
feytus/twitch-info
|
29a3da3d37ac221d3edb1bc38875ec20cacfd660
|
[
"MIT"
] | null | null | null |
twitch_info/__init__.py
|
feytus/twitch-info
|
29a3da3d37ac221d3edb1bc38875ec20cacfd660
|
[
"MIT"
] | null | null | null |
twitch_info/__init__.py
|
feytus/twitch-info
|
29a3da3d37ac221d3edb1bc38875ec20cacfd660
|
[
"MIT"
] | null | null | null |
from twitch_info.twitch_info import get_stream
from twitch_info.twitch_info import get_user_id
from twitch_info.twitch_info import get_access_token
from twitch_info.twitch_info import InvalidClient
from twitch_info.twitch_info import InvalidOAuthToken
from twitch_info.twitch_info import InvalidUser
from twitch_info.twitch_info import ValuesNotMatching
| 44.25
| 53
| 0.90113
| 54
| 354
| 5.555556
| 0.259259
| 0.466667
| 0.326667
| 0.466667
| 0.73
| 0.73
| 0.33
| 0
| 0
| 0
| 0
| 0
| 0.079096
| 354
| 8
| 54
| 44.25
| 0.920245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
e6339908f746d7019ee114d60142b86e58c106b2
| 298
|
py
|
Python
|
pyBN/learning/structure/constraint/__init__.py
|
seuzmj/pyBN
|
ce7b6823f4e6c4f6f9b77e89f05de87ed486b349
|
[
"MIT"
] | 126
|
2016-01-17T22:59:08.000Z
|
2021-12-19T15:35:22.000Z
|
pyBN/learning/structure/constraint/__init__.py
|
levilentz/pyBN
|
ce7b6823f4e6c4f6f9b77e89f05de87ed486b349
|
[
"MIT"
] | 24
|
2016-01-21T20:11:03.000Z
|
2018-09-21T01:23:58.000Z
|
pyBN/learning/structure/constraint/__init__.py
|
levilentz/pyBN
|
ce7b6823f4e6c4f6f9b77e89f05de87ed486b349
|
[
"MIT"
] | 55
|
2016-05-27T00:46:54.000Z
|
2022-03-24T11:43:57.000Z
|
from pyBN.learning.structure.constraint.fast_iamb import *
from pyBN.learning.structure.constraint.grow_shrink import *
from pyBN.learning.structure.constraint.iamb import *
from pyBN.learning.structure.constraint.lambda_iamb import *
from pyBN.learning.structure.constraint.path_condition import *
| 59.6
| 63
| 0.852349
| 39
| 298
| 6.410256
| 0.333333
| 0.16
| 0.32
| 0.5
| 0.844
| 0.704
| 0.54
| 0
| 0
| 0
| 0
| 0
| 0.063758
| 298
| 5
| 63
| 59.6
| 0.896057
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
e6643a1835f7cd88e0c0262a175a67efcef2b73b
| 978
|
py
|
Python
|
maxout.py
|
federicobergamin/Variational-Inference-with-Normalizing-Flows
|
09c3702a4ae04d044bc9bfefa20de5078a44caab
|
[
"MIT"
] | 16
|
2019-12-23T12:12:07.000Z
|
2022-03-07T08:29:51.000Z
|
maxout.py
|
federicobergamin/Variational-Inference-with-Normalizing-Flows
|
09c3702a4ae04d044bc9bfefa20de5078a44caab
|
[
"MIT"
] | 1
|
2020-06-23T05:21:07.000Z
|
2020-06-23T08:01:50.000Z
|
maxout.py
|
federicobergamin/Variational-Inference-with-Normalizing-Flows
|
09c3702a4ae04d044bc9bfefa20de5078a44caab
|
[
"MIT"
] | 4
|
2020-10-01T07:15:15.000Z
|
2022-03-03T10:59:23.000Z
|
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.autograd import Function
# class Maxout(nn.Module):
# def __init__(self, pool_size):
# super().__init__()
# self._pool_size = pool_size
#
# def forward(self, x):
# assert x.shape[-1] % self._pool_size == 0, \
# 'Wrong input last dim size ({}) for Maxout({})'.format(x.shape[-1], self._pool_size)
# m, i = x.view(*x.shape[:-1], x.shape[-1] // self._pool_size, self._pool_size).max(-1)
# return m
class Maxout(nn.Module):
def __init__(self, pool_size):
super().__init__()
self._pool_size = pool_size
def forward(self, x):
assert x.shape[1] % self._pool_size == 0, \
'Wrong input last dim size ({}) for Maxout({})'.format(x.shape[1], self._pool_size)
m, i = x.view(*x.shape[:1], x.shape[1] // self._pool_size, self._pool_size, *x.shape[2:]).max(2)
return m
| 34.928571
| 104
| 0.603272
| 148
| 978
| 3.716216
| 0.25
| 0.203636
| 0.261818
| 0.12
| 0.756364
| 0.756364
| 0.756364
| 0.756364
| 0.756364
| 0.756364
| 0
| 0.017333
| 0.233129
| 978
| 28
| 105
| 34.928571
| 0.716
| 0.4182
| 0
| 0
| 0
| 0
| 0.080501
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.142857
| false
| 0
| 0.357143
| 0
| 0.642857
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
0545ba2c786ef6af441e9067891282855b52b8e4
| 218,903
|
py
|
Python
|
mp/utils/generate_labels.py
|
MECLabTUDA/QA_med_data
|
72897cb2d8e520dde6b88318c23bca32eb9210d7
|
[
"MIT"
] | null | null | null |
mp/utils/generate_labels.py
|
MECLabTUDA/QA_med_data
|
72897cb2d8e520dde6b88318c23bca32eb9210d7
|
[
"MIT"
] | null | null | null |
mp/utils/generate_labels.py
|
MECLabTUDA/QA_med_data
|
72897cb2d8e520dde6b88318c23bca32eb9210d7
|
[
"MIT"
] | null | null | null |
import os
import json
def generate_train_labels(num_intensities, source_path, target_path, swap_labels=False):
r"""This function generates the labels.json file that is necessary for training."""
# Foldernames are patient_id
filenames = [x for x in os.listdir(source_path) if '._' not in x and 'Decathlon' in x\
and not 'blur' in x and not 'resolution' in x and not 'ghosting' in x and not 'motion' in x\
and not 'noise' in x and not 'spike' in x]
# Generate labels for Decathlon with augmentation
labels = dict()
for name in filenames:
labels[str(name)] = 5/num_intensities
labels[str(name) + '_blur4'] = 4/num_intensities
labels[str(name) + '_blur3'] = 3/num_intensities
labels[str(name) + '_blur2'] = 2/num_intensities
labels[str(name) + '_blur1'] = 1/num_intensities
labels[str(name) + '_resolution4'] = 4/num_intensities
labels[str(name) + '_resolution3'] = 3/num_intensities
labels[str(name) + '_resolution2'] = 2/num_intensities
labels[str(name) + '_resolution1'] = 1/num_intensities
labels[str(name) + '_ghosting4'] = 4/num_intensities
labels[str(name) + '_ghosting3'] = 3/num_intensities
labels[str(name) + '_ghosting2'] = 2/num_intensities
labels[str(name) + '_ghosting1'] = 1/num_intensities
labels[str(name) + '_motion4'] = 4/num_intensities
labels[str(name) + '_motion3'] = 3/num_intensities
labels[str(name) + '_motion2'] = 2/num_intensities
labels[str(name) + '_motion1'] = 1/num_intensities
labels[str(name) + '_noise4'] = 4/num_intensities
labels[str(name) + '_noise3'] = 3/num_intensities
labels[str(name) + '_noise2'] = 2/num_intensities
labels[str(name) + '_noise1'] = 1/num_intensities
labels[str(name) + '_spike4'] = 4/num_intensities
labels[str(name) + '_spike3'] = 3/num_intensities
labels[str(name) + '_spike2'] = 2/num_intensities
labels[str(name) + '_spike1'] = 1/num_intensities
# Add GC labels (defined by hand --> do not delete) to labels dict --> for uncropped images!
labels['GC_Corona_volume-covid19-A-0003'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0003'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0003'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0003'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0003'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0003'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0011'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0011'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0011'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0011'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0011'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0011'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0013'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0013'+'_resolution'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0013'+'_ghosting'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0013'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0013'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0013'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0014'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0014'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0014'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0014'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0014'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0014'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0016'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0016'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0016'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0016'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0016'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0016'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0025'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0025'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0025'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0025'+'_motion'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0025'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0025'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0031'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0031'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0031'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0031'+'_motion'] = 2/num_intensities
labels['GC_Corona_volume-covid19-A-0031'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0031'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0034'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0034'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0034'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0034'+'_motion'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0034'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0034'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0038'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0038'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0038'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0038'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0038'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0038'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0039'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0039'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0039'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0039'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0039'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0039'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0041'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0041'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0041'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0041'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0041'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0041'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0044'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0044'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0044'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0044'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0044'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0044'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0046'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0046'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0046'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0046'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0046'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0046'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0047_1'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0047_1'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0047_1'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0047_1'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0047_1'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0047_1'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0053'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0053'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0053'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0053'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0053'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0053'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0054'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0054'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0054'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0054'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0054'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0054'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0066'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0066'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0066'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0066'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0066'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0066'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0070'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0070'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0070'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0070'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0070'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0070'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0072'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0072'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0072'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0072'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0072'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0072'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0073'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0073'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0073'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0073'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0073'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0073'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0074_1'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0074_1'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0074_1'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0074_1'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0074_1'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0074_1'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0083'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0083'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0083'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0083'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0083'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0083'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0090'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0090'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0090'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0090'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0090'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0090'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0092'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0092'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0092'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0092'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0092'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0092'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0096'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0096'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0096'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0096'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0096'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0096'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0106'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0106'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0106'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0106'+'_motion'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0106'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0106'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0110'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0110'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0110'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0110'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0110'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0110'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0112'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0112'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0112'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0112'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0112'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0112'+'_spike'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0114'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0114'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0114'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0114'+'_motion'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0114'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0114'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0120'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0120'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0120'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0120'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0120'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0120'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0129'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0129'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0129'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0129'+'_motion'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0129'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0129'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0130'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0130'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0130'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0130'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0130'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0130'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0133'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0133'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0133'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0133'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0133'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0133'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0147'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0147'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0147'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0147'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0147'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0147'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0151'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0151'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0151'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0151'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0151'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0151'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0154'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0154'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0154'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0154'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0154'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0154'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0165'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0165'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0165'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0165'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0165'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0165'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0167_1'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0167_1'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0167_1'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0167_1'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0167_1'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0167_1'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0173'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0173'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0173'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0173'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0173'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0173'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0178'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0178'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0178'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0178'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0178'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0178'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0179'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0179'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0179'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0179'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0179'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0179'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0181'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0181'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0181'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0181'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0181'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0181'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0187'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0187'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0187'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0187'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0187'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0187'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0196_0'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0196_0'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0196_0'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0196_0'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0196_0'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0196_0'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0199'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0199'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0199'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0199'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0199'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0199'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0201'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0201'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0201'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0201'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0201'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0201'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0202_0'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0202_0'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0202_0'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0202_0'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0202_0'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0202_0'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0214'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0214'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0214'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0214'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0214'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0214'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0215'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0215'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0215'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0215'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0215'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0215'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0228'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0228'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0228'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0228'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0228'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0228'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0233'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0233'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0233'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0233'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0233'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0233'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0236'+'_blur'] = 2/num_intensities
labels['GC_Corona_volume-covid19-A-0236'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0236'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0236'+'_motion'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0236'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0236'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0237'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0237'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0237'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0237'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0237'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0237'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0239'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0239'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0239'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0239'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0239'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0239'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0246'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0246'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0246'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0246'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0246'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0246'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0247'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0247'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0247'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0247'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0247'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0247'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0251'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0251'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0251'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0251'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0251'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0251'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0252'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0252'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0252'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0252'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0252'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0252'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0255'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0255'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0255'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0255'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0255'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0255'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0256_1'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0256_1'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0256_1'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0256_1'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0256_1'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0256_1'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0263'+'_blur'] = 2/num_intensities
labels['GC_Corona_volume-covid19-A-0263'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0263'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0263'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0263'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0263'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0264'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0264'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0264'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0264'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0264'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0264'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0267'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0267'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0267'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0267'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0267'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0267'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0270'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0270'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0270'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0270'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0270'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0270'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0282'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0282'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0282'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0282'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0282'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0282'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0285'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0285'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0285'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0285'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0285'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0285'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0288'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0288'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0288'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0288'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0288'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0288'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0295'+'_blur'] = 2/num_intensities
labels['GC_Corona_volume-covid19-A-0295'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0295'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0295'+'_motion'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0295'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0295'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0296'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0296'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0296'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0296'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0296'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0296'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0299'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0299'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0299'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0299'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0299'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0299'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0301'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0301'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0301'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0301'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0301'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0301'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0307'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0307'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0307'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0307'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0307'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0307'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0313'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0313'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0313'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0313'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0313'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0313'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0314'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0314'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0314'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0314'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0314'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0314'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0315'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0315'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0315'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0315'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0315'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0315'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0316'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0316'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0316'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0316'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0316'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0316'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0320'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0320'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0320'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0320'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0320'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0320'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0323'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0323'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0323'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0323'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0323'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0323'+'_spike'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0329'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0329'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0329'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0329'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0329'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0329'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0331'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0331'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0331'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0331'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0331'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0331'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0332'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0332'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0332'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0332'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0332'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0332'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0338'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0338'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0338'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0338'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0338'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0338'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0339'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0339'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0339'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0339'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0339'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0339'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0342'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0342'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0342'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0342'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0342'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0342'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0347'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0347'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0347'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0347'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0347'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0347'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0351'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0351'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0351'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0351'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0351'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0351'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0354'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0354'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0354'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0354'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0354'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0354'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0355'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0355'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0355'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0355'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0355'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0355'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0360'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0360'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0360'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0360'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0360'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0360'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0361'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0361'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0361'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0361'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0361'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0361'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0366'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0366'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0366'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0366'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0366'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0366'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0372'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0372'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0372'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0372'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0372'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0372'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0377'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0377'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0377'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0377'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0377'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0377'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0380'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0380'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0380'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0380'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0380'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0380'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0382'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0382'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0382'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0382'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0382'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0382'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0383_1'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0383_1'+'_resolution'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0383_1'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0383_1'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0383_1'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0383_1'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0386'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0386'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0386'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0386'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0386'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0386'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0387'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0387'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0387'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0387'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0387'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0387'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0388'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0388'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0388'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0388'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0388'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0388'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0391'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0391'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0391'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0391'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0391'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0391'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0392'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0392'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0392'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0392'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0392'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0392'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0394'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0394'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0394'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0394'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0394'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0394'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0397'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0397'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0397'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0397'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0397'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0397'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0400'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0400'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0400'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0400'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0400'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0400'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0402'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0402'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0402'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0402'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0402'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0402'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0407'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0407'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0407'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0407'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0407'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0407'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0413'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0413'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0413'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0413'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0413'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0413'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0414'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0414'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0414'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0414'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0414'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0414'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0416'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0416'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0416'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0416'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0416'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0416'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0417'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0417'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0417'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0417'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0417'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0417'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0418'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0418'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0418'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0418'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0418'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0418'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0421'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0421'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0421'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0421'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0421'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0421'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0422'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0422'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0422'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0422'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0422'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0422'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0423'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0423'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0423'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0423'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0423'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0423'+'_spike'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0435'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0435'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0435'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0435'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0435'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0435'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0437'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0437'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0437'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0437'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0437'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0437'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0443'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0443'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0443'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0443'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0443'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0443'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0445'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0445'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0445'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0445'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0445'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0445'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0455'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0455'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0455'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0455'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0455'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0455'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0462'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0462'+'_resolution'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0462'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0462'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0462'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0462'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0463'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0463'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0463'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0463'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0463'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0463'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0464'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0464'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0464'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0464'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0464'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0464'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0472'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0472'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0472'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0472'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0472'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0472'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0473'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0473'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0473'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0473'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0473'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0473'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0475'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0475'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0475'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0475'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0475'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0475'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0476'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0476'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0476'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0476'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0476'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0476'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0479'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0479'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0479'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0479'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0479'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0479'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0483'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0483'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0483'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0483'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0483'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0483'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0494'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0494'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0494'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0494'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0494'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0494'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0495'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0495'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0495'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0495'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0495'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0495'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0498'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0498'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0498'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0498'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0498'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0498'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0500'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0500'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0500'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0500'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0500'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0500'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0502'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0502'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0502'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0502'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0502'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0502'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0504'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0504'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0504'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0504'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0504'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0504'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0511'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0511'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0511'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0511'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0511'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0511'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0521'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0521'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0521'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0521'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0521'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0521'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0522'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0522'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0522'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0522'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0522'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0522'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0524'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0524'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0524'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0524'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0524'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0524'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0525'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0525'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0525'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0525'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0525'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0525'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0526'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0526'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0526'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0526'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0526'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0526'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0530'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0530'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0530'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0530'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0530'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0530'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0531'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0531'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0531'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0531'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0531'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0531'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0534'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0534'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0534'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0534'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0534'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0534'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0537'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0537'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0537'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0537'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0537'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0537'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0548'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0548'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0548'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0548'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0548'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0548'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0553'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0553'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0553'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0553'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0553'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0553'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0557'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0557'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0557'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0557'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0557'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0557'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0559'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0559'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0559'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0559'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0559'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0559'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0560'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0560'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0560'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0560'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0560'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0560'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0562'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0562'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0562'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0562'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0562'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0562'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0567'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0567'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0567'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0567'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0567'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0567'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0569'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0569'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0569'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0569'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0569'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0569'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0570'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0570'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0570'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0570'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0570'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0570'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0573'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0573'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0573'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0573'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0573'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0573'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0575'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0575'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0575'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0575'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0575'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0575'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0576'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0576'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0576'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0576'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0576'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0576'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0579'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0579'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0579'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0579'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0579'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0579'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0581'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0581'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0581'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0581'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0581'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0581'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0585'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0585'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0585'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0585'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0585'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0585'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0586'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0586'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0586'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0586'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0586'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0586'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0589'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0589'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0589'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0589'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0589'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0589'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0590'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0590'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0590'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0590'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0590'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0590'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0599'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0599'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0599'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0599'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0599'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0599'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0600'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0600'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0600'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0600'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0600'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0600'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0604'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0604'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0604'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0604'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0604'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0604'+'_spike'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0612'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0612'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0612'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0612'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0612'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0612'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0614'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0614'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0614'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0614'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0614'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0614'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0623'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0623'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0623'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0623'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0623'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0623'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0626'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0626'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0626'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0626'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0626'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0626'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0627'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0627'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0627'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0627'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0627'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0627'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0629'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0629'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0629'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0629'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0629'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0629'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0635'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0635'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0635'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0635'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0635'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0635'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0636'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0636'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0636'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0636'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0636'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0636'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0638'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0638'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0638'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0638'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0638'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0638'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0643'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0643'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0643'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0643'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0643'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0643'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0648'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0648'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0648'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0648'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0648'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0648'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0652'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0652'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0652'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0652'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0652'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0652'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0656'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0656'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0656'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0656'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0656'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0656'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0657'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0657'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0657'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0657'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0657'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0657'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0658'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0658'+'_resolution'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0658'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0658'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0658'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0658'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0659'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0659'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0659'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0659'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0659'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0659'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0660'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0660'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0660'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0660'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0660'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0660'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0665'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0665'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0665'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0665'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0665'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0665'+'_spike'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0666'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0666'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0666'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0666'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0666'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0666'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0669'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0669'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0669'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0669'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0669'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0669'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0670'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0670'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0670'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0670'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0670'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0670'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0678'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0678'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0678'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0678'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0678'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0678'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0685'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0685'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0685'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0685'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0685'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0685'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0686'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0686'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0686'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0686'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0686'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0686'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0694'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0694'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0694'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0694'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0694'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0694'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0696'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0696'+'_resolution'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0696'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0696'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0696'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0696'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0698'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0698'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0698'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0698'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0698'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0698'+'_spike'] = 5/num_intensities
# Foldernames are patient_id
filenames_GC = [x for x in os.listdir(source_path) if 'DS_Store' not in x and 'GC_Corona' in x\
and not 'blur' in x and not 'resolution' in x and not 'ghosting' in x and not 'motion' in x\
and not 'noise' in x and not 'spike' in x]
# Generate labels for GC augmentation
for name in filenames_GC:
# Extract corresponding values
blur_value = labels[str(name)+'_blur'] * num_intensities
resolution_value = labels[str(name)+'_resolution'] * num_intensities
ghosting_value = labels[str(name)+'_ghosting'] * num_intensities
motion_value = labels[str(name)+'_motion'] * num_intensities
noise_value = labels[str(name)+'_noise'] * num_intensities
spike_value = labels[str(name)+'_spike'] * num_intensities
# Augmented blurred images
labels[str(name) + '_blur4_blur'] = max(1/num_intensities, (blur_value - 1)/num_intensities) # Good quality image --> blur_4
labels[str(name) + '_blur4_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_blur4_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_blur4_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_blur4_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_blur4_spike'] = labels[str(name)+'_spike']
labels[str(name) + '_blur3_blur'] = max(1/num_intensities, (blur_value - 2)/num_intensities) # blur_3
labels[str(name) + '_blur3_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_blur3_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_blur3_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_blur3_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_blur3_spike'] = labels[str(name)+'_spike']
labels[str(name) + '_blur2_blur'] = max(1/num_intensities, (blur_value - 3)/num_intensities) # blur_2
labels[str(name) + '_blur2_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_blur2_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_blur2_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_blur2_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_blur2_spike'] = labels[str(name)+'_spike']
labels[str(name) + '_blur1_blur'] = max(1/num_intensities, (blur_value - 4)/num_intensities) # Bad quality image --> blur_1
labels[str(name) + '_blur1_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_blur1_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_blur1_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_blur1_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_blur1_spike'] = labels[str(name)+'_spike']
# Augmented downsampled images
labels[str(name) + '_resolution4_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_resolution4_resolution'] = max(1/num_intensities, (resolution_value - 1)/num_intensities) # Good quality image --> resolution_4
labels[str(name) + '_resolution4_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_resolution4_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_resolution4_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_resolution4_spike'] = labels[str(name)+'_spike']
labels[str(name) + '_resolution3_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_resolution3_resolution'] = max(1/num_intensities, (resolution_value - 2)/num_intensities) # resolution_3
labels[str(name) + '_resolution3_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_resolution3_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_resolution3_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_resolution3_spike'] = labels[str(name)+'_spike']
labels[str(name) + '_resolution2_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_resolution2_resolution'] = max(1/num_intensities, (resolution_value - 3)/num_intensities) # resolution_2
labels[str(name) + '_resolution2_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_resolution2_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_resolution2_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_resolution2_spike'] = labels[str(name)+'_spike']
labels[str(name) + '_resolution1_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_resolution1_resolution'] = max(1/num_intensities, (resolution_value - 4)/num_intensities) # Bad quality image --> resolution_1
labels[str(name) + '_resolution1_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_resolution1_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_resolution1_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_resolution1_spike'] = labels[str(name)+'_spike']
# Augmented ghosted images
labels[str(name) + '_ghosting4_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_ghosting4_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_ghosting4_ghosting'] = max(1/num_intensities, (ghosting_value - 1)/num_intensities) # Good quality image --> ghosting_4
labels[str(name) + '_ghosting4_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_ghosting4_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_ghosting4_spike'] = labels[str(name)+'_spike']
labels[str(name) + '_ghosting3_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_ghosting3_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_ghosting3_ghosting'] = max(1/num_intensities, (ghosting_value - 2)/num_intensities) # ghosting_3
labels[str(name) + '_ghosting3_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_ghosting3_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_ghosting3_spike'] = labels[str(name)+'_spike']
labels[str(name) + '_ghosting2_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_ghosting2_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_ghosting2_ghosting'] = max(1/num_intensities, (ghosting_value - 3)/num_intensities) # ghosting_2
labels[str(name) + '_ghosting2_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_ghosting2_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_ghosting2_spike'] = labels[str(name)+'_spike']
labels[str(name) + '_ghosting1_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_ghosting1_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_ghosting1_ghosting'] = max(1/num_intensities, (ghosting_value - 4)/num_intensities) # Bad quality image --> ghosting_1
labels[str(name) + '_ghosting1_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_ghosting1_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_ghosting1_spike'] = labels[str(name)+'_spike']
# Augmented motion images
labels[str(name) + '_motion4_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_motion4_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_motion4_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_motion4_motion'] = max(1/num_intensities, (motion_value - 1)/num_intensities) # Good quality image --> motion_4
labels[str(name) + '_motion4_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_motion4_spike'] = labels[str(name)+'_spike']
labels[str(name) + '_motion3_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_motion3_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_motion3_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_motion3_motion'] = max(1/num_intensities, (motion_value - 2)/num_intensities) # motion_3
labels[str(name) + '_motion3_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_motion3_spike'] = labels[str(name)+'_spike']
labels[str(name) + '_motion2_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_motion2_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_motion2_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_motion2_motion'] = max(1/num_intensities, (motion_value - 3)/num_intensities) # motion_2
labels[str(name) + '_motion2_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_motion2_spike'] = labels[str(name)+'_spike']
labels[str(name) + '_motion1_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_motion1_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_motion1_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_motion1_motion'] = max(1/num_intensities, (motion_value - 4)/num_intensities) # Good quality image --> motion_1
labels[str(name) + '_motion1_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_motion1_spike'] = labels[str(name)+'_spike']
# Augmented noise images
labels[str(name) + '_noise4_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_noise4_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_noise4_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_noise4_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_noise4_noise'] = max(1/num_intensities, (noise_value - 1)/num_intensities) # Good quality image --> noise_4
labels[str(name) + '_noise4_spike'] = labels[str(name)+'_spike']
labels[str(name) + '_noise3_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_noise3_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_noise3_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_noise3_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_noise3_noise'] = max(1/num_intensities, (noise_value - 2)/num_intensities) # noise_3
labels[str(name) + '_noise3_spike'] = labels[str(name)+'_spike']
labels[str(name) + '_noise2_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_noise2_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_noise2_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_noise2_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_noise2_noise'] = max(1/num_intensities, (noise_value - 3)/num_intensities) # noise_2
labels[str(name) + '_noise2_spike'] = labels[str(name)+'_spike']
labels[str(name) + '_noise1_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_noise1_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_noise1_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_noise1_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_noise1_noise'] = max(1/num_intensities, (noise_value - 4)/num_intensities) # Bad quality image --> noise_1
labels[str(name) + '_noise1_spike'] = labels[str(name)+'_spike']
# Augmented spike images
labels[str(name) + '_spike4_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_spike4_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_spike4_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_spike4_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_spike4_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_spike4_spike'] = max(1/num_intensities, (spike_value - 1)/num_intensities) # Good quality image --> spike_4
labels[str(name) + '_spike3_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_spike3_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_spike3_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_spike3_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_spike3_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_spike3_spike'] = max(1/num_intensities, (spike_value - 2)/num_intensities) # spike_3
labels[str(name) + '_spike2_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_spike2_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_spike2_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_spike2_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_spike2_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_spike2_spike'] = max(1/num_intensities, (spike_value - 3)/num_intensities) # spike_2
labels[str(name) + '_spike1_blur'] = labels[str(name)+'_blur']
labels[str(name) + '_spike1_resolution'] = labels[str(name)+'_resolution']
labels[str(name) + '_spike1_ghosting'] = labels[str(name)+'_ghosting']
labels[str(name) + '_spike1_motion'] = labels[str(name)+'_motion']
labels[str(name) + '_spike1_noise'] = labels[str(name)+'_noise']
labels[str(name) + '_spike1_spike'] = max(1/num_intensities, (spike_value - 4)/num_intensities) # Bad quality image --> spike_1
# Save labels
print("Saving generated labels..")
if not os.path.isdir(target_path):
os.makedirs(target_path)
with open(os.path.join(target_path, 'labels.json'), 'w') as fp:
json.dump(labels, fp, sort_keys=True, indent=4)
# Transform labels in such a way: k:v --> v_artefact:[k] if desired
if swap_labels:
labels_swapped = dict()
augmentationT = ['blur', 'noise', 'ghosting', 'spike', 'resolution', 'motion']
intensities = [1/num_intensities, 2/num_intensities, 3/num_intensities, 4/num_intensities, 5/num_intensities]
# Loop through labels and change k:v to v_artefact:[k]
for k, v in labels.items():
intensity = str(v)
augmentation = ''.join([i for i in str(k.split('_')[-1]) if not i.isdigit()])
key = str(intensity+'_'+augmentation)
if key == '1.0_': # Decathlon Data with not augmentation --> perfekt in all augmentations
for a in augmentationT:
a_key = key+str(a)
if a_key in labels_swapped:
v_list = labels_swapped[a_key]
v_list.append(k)
labels_swapped[a_key] = v_list
else:
labels_swapped[a_key] = [k]
elif key in labels_swapped:
v_list = labels_swapped[key]
v_list.append(k)
labels_swapped[key] = v_list
else:
labels_swapped[key] = [k]
# Add all missing v_artefacts with empty lists --> v_artefact:[]
for i in intensities:
for a in augmentationT:
key = str(i)+'_'+str(a)
if key not in labels_swapped:
labels_swapped[key] = list()
# Save labels
print("Saving swapped labels..")
with open(os.path.join(target_path, 'labels_swapped.json'), 'w') as fp:
json.dump(labels_swapped, fp, sort_keys=True, indent=4)
def generate_test_labels(num_intensities, source_path, target_path):
r"""This function generates the labels.json file that is necessary for testing on an unseen dataset."""
# Foldernames are patient_id
filenames = [x for x in os.listdir(source_path) if 'DS_Store' not in x and 'DecathlonLung' in x\
and not 'blur' in x and not 'resolution' in x and not 'ghosting' in x and not 'motion' in x\
and not 'noise' in x and not 'spike' in x]
# Generate labels for Decathlon with augmentation
labels = dict()
# Add MosMed labels (defined by hand --> do not delete) to labels dict
labels['Mosmed_0001'+'_blur'] = 4/num_intensities
labels['Mosmed_0001'+'_resolution'] = 5/num_intensities
labels['Mosmed_0001'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0001'+'_motion'] = 5/num_intensities
labels['Mosmed_0001'+'_noise'] = 5/num_intensities
labels['Mosmed_0001'+'_spike'] = 5/num_intensities
labels['Mosmed_0002'+'_blur'] = 5/num_intensities
labels['Mosmed_0002'+'_resolution'] = 5/num_intensities
labels['Mosmed_0002'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0002'+'_motion'] = 5/num_intensities
labels['Mosmed_0002'+'_noise'] = 5/num_intensities
labels['Mosmed_0002'+'_spike'] = 4/num_intensities
labels['Mosmed_0003'+'_blur'] = 5/num_intensities
labels['Mosmed_0003'+'_resolution'] = 5/num_intensities
labels['Mosmed_0003'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0003'+'_motion'] = 5/num_intensities
labels['Mosmed_0003'+'_noise'] = 5/num_intensities
labels['Mosmed_0003'+'_spike'] = 5/num_intensities
labels['Mosmed_0004'+'_blur'] = 4/num_intensities
labels['Mosmed_0004'+'_resolution'] = 5/num_intensities
labels['Mosmed_0004'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0004'+'_motion'] = 5/num_intensities
labels['Mosmed_0004'+'_noise'] = 5/num_intensities
labels['Mosmed_0004'+'_spike'] = 4/num_intensities
labels['Mosmed_0005'+'_blur'] = 5/num_intensities
labels['Mosmed_0005'+'_resolution'] = 5/num_intensities
labels['Mosmed_0005'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0005'+'_motion'] = 5/num_intensities
labels['Mosmed_0005'+'_noise'] = 5/num_intensities
labels['Mosmed_0005'+'_spike'] = 5/num_intensities
labels['Mosmed_0006'+'_blur'] = 5/num_intensities
labels['Mosmed_0006'+'_resolution'] = 5/num_intensities
labels['Mosmed_0006'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0006'+'_motion'] = 5/num_intensities
labels['Mosmed_0006'+'_noise'] = 5/num_intensities
labels['Mosmed_0006'+'_spike'] = 5/num_intensities
labels['Mosmed_0007'+'_blur'] = 5/num_intensities
labels['Mosmed_0007'+'_resolution'] = 5/num_intensities
labels['Mosmed_0007'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0007'+'_motion'] = 5/num_intensities
labels['Mosmed_0007'+'_noise'] = 5/num_intensities
labels['Mosmed_0007'+'_spike'] = 5/num_intensities
labels['Mosmed_0008'+'_blur'] = 4/num_intensities
labels['Mosmed_0008'+'_resolution'] = 5/num_intensities
labels['Mosmed_0008'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0008'+'_motion'] = 5/num_intensities
labels['Mosmed_0008'+'_noise'] = 5/num_intensities
labels['Mosmed_0008'+'_spike'] = 4/num_intensities
labels['Mosmed_0009'+'_blur'] = 5/num_intensities
labels['Mosmed_0009'+'_resolution'] = 5/num_intensities
labels['Mosmed_0009'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0009'+'_motion'] = 5/num_intensities
labels['Mosmed_0009'+'_noise'] = 5/num_intensities
labels['Mosmed_0009'+'_spike'] = 4/num_intensities
labels['Mosmed_0010'+'_blur'] = 5/num_intensities
labels['Mosmed_0010'+'_resolution'] = 5/num_intensities
labels['Mosmed_0010'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0010'+'_motion'] = 5/num_intensities
labels['Mosmed_0010'+'_noise'] = 5/num_intensities
labels['Mosmed_0010'+'_spike'] = 5/num_intensities
labels['Mosmed_0011'+'_blur'] = 4/num_intensities
labels['Mosmed_0011'+'_resolution'] = 5/num_intensities
labels['Mosmed_0011'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0011'+'_motion'] = 5/num_intensities
labels['Mosmed_0011'+'_noise'] = 5/num_intensities
labels['Mosmed_0011'+'_spike'] = 5/num_intensities
labels['Mosmed_0012'+'_blur'] = 3/num_intensities
labels['Mosmed_0012'+'_resolution'] = 5/num_intensities
labels['Mosmed_0012'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0012'+'_motion'] = 5/num_intensities
labels['Mosmed_0012'+'_noise'] = 5/num_intensities
labels['Mosmed_0012'+'_spike'] = 4/num_intensities
labels['Mosmed_0013'+'_blur'] = 5/num_intensities
labels['Mosmed_0013'+'_resolution'] = 5/num_intensities
labels['Mosmed_0013'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0013'+'_motion'] = 5/num_intensities
labels['Mosmed_0013'+'_noise'] = 5/num_intensities
labels['Mosmed_0013'+'_spike'] = 5/num_intensities
labels['Mosmed_0014'+'_blur'] = 5/num_intensities
labels['Mosmed_0014'+'_resolution'] = 5/num_intensities
labels['Mosmed_0014'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0014'+'_motion'] = 5/num_intensities
labels['Mosmed_0014'+'_noise'] = 5/num_intensities
labels['Mosmed_0014'+'_spike'] = 5/num_intensities
labels['Mosmed_0015'+'_blur'] = 5/num_intensities
labels['Mosmed_0015'+'_resolution'] = 5/num_intensities
labels['Mosmed_0015'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0015'+'_motion'] = 5/num_intensities
labels['Mosmed_0015'+'_noise'] = 5/num_intensities
labels['Mosmed_0015'+'_spike'] = 5/num_intensities
labels['Mosmed_0016'+'_blur'] = 5/num_intensities
labels['Mosmed_0016'+'_resolution'] = 5/num_intensities
labels['Mosmed_0016'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0016'+'_motion'] = 5/num_intensities
labels['Mosmed_0016'+'_noise'] = 5/num_intensities
labels['Mosmed_0016'+'_spike'] = 5/num_intensities
labels['Mosmed_0017'+'_blur'] = 5/num_intensities
labels['Mosmed_0017'+'_resolution'] = 5/num_intensities
labels['Mosmed_0017'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0017'+'_motion'] = 5/num_intensities
labels['Mosmed_0017'+'_noise'] = 5/num_intensities
labels['Mosmed_0017'+'_spike'] = 4/num_intensities
labels['Mosmed_0018'+'_blur'] = 5/num_intensities
labels['Mosmed_0018'+'_resolution'] = 5/num_intensities
labels['Mosmed_0018'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0018'+'_motion'] = 5/num_intensities
labels['Mosmed_0018'+'_noise'] = 5/num_intensities
labels['Mosmed_0018'+'_spike'] = 5/num_intensities
labels['Mosmed_0019'+'_blur'] = 5/num_intensities
labels['Mosmed_0019'+'_resolution'] = 5/num_intensities
labels['Mosmed_0019'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0019'+'_motion'] = 5/num_intensities
labels['Mosmed_0019'+'_noise'] = 5/num_intensities
labels['Mosmed_0019'+'_spike'] = 5/num_intensities
labels['Mosmed_0020'+'_blur'] = 5/num_intensities
labels['Mosmed_0020'+'_resolution'] = 5/num_intensities
labels['Mosmed_0020'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0020'+'_motion'] = 5/num_intensities
labels['Mosmed_0020'+'_noise'] = 5/num_intensities
labels['Mosmed_0020'+'_spike'] = 5/num_intensities
labels['Mosmed_0021'+'_blur'] = 5/num_intensities
labels['Mosmed_0021'+'_resolution'] = 5/num_intensities
labels['Mosmed_0021'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0021'+'_motion'] = 5/num_intensities
labels['Mosmed_0021'+'_noise'] = 5/num_intensities
labels['Mosmed_0021'+'_spike'] = 4/num_intensities
labels['Mosmed_0022'+'_blur'] = 5/num_intensities
labels['Mosmed_0022'+'_resolution'] = 5/num_intensities
labels['Mosmed_0022'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0022'+'_motion'] = 5/num_intensities
labels['Mosmed_0022'+'_noise'] = 5/num_intensities
labels['Mosmed_0022'+'_spike'] = 5/num_intensities
labels['Mosmed_0023'+'_blur'] = 5/num_intensities
labels['Mosmed_0023'+'_resolution'] = 5/num_intensities
labels['Mosmed_0023'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0023'+'_motion'] = 5/num_intensities
labels['Mosmed_0023'+'_noise'] = 5/num_intensities
labels['Mosmed_0023'+'_spike'] = 5/num_intensities
labels['Mosmed_0024'+'_blur'] = 5/num_intensities
labels['Mosmed_0024'+'_resolution'] = 5/num_intensities
labels['Mosmed_0024'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0024'+'_motion'] = 5/num_intensities
labels['Mosmed_0024'+'_noise'] = 5/num_intensities
labels['Mosmed_0024'+'_spike'] = 5/num_intensities
labels['Mosmed_0025'+'_blur'] = 5/num_intensities
labels['Mosmed_0025'+'_resolution'] = 5/num_intensities
labels['Mosmed_0025'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0025'+'_motion'] = 5/num_intensities
labels['Mosmed_0025'+'_noise'] = 5/num_intensities
labels['Mosmed_0025'+'_spike'] = 5/num_intensities
labels['Mosmed_0026'+'_blur'] = 5/num_intensities
labels['Mosmed_0026'+'_resolution'] = 5/num_intensities
labels['Mosmed_0026'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0026'+'_motion'] = 5/num_intensities
labels['Mosmed_0026'+'_noise'] = 5/num_intensities
labels['Mosmed_0026'+'_spike'] = 5/num_intensities
labels['Mosmed_0027'+'_blur'] = 5/num_intensities
labels['Mosmed_0027'+'_resolution'] = 5/num_intensities
labels['Mosmed_0027'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0027'+'_motion'] = 5/num_intensities
labels['Mosmed_0027'+'_noise'] = 5/num_intensities
labels['Mosmed_0027'+'_spike'] = 4/num_intensities
labels['Mosmed_0028'+'_blur'] = 5/num_intensities
labels['Mosmed_0028'+'_resolution'] = 5/num_intensities
labels['Mosmed_0028'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0028'+'_motion'] = 5/num_intensities
labels['Mosmed_0028'+'_noise'] = 5/num_intensities
labels['Mosmed_0028'+'_spike'] = 5/num_intensities
labels['Mosmed_0029'+'_blur'] = 5/num_intensities
labels['Mosmed_0029'+'_resolution'] = 5/num_intensities
labels['Mosmed_0029'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0029'+'_motion'] = 5/num_intensities
labels['Mosmed_0029'+'_noise'] = 5/num_intensities
labels['Mosmed_0029'+'_spike'] = 5/num_intensities
labels['Mosmed_0030'+'_blur'] = 5/num_intensities
labels['Mosmed_0030'+'_resolution'] = 5/num_intensities
labels['Mosmed_0030'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0030'+'_motion'] = 5/num_intensities
labels['Mosmed_0030'+'_noise'] = 5/num_intensities
labels['Mosmed_0030'+'_spike'] = 5/num_intensities
labels['Mosmed_0031'+'_blur'] = 5/num_intensities
labels['Mosmed_0031'+'_resolution'] = 5/num_intensities
labels['Mosmed_0031'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0031'+'_motion'] = 5/num_intensities
labels['Mosmed_0031'+'_noise'] = 5/num_intensities
labels['Mosmed_0031'+'_spike'] = 5/num_intensities
labels['Mosmed_0032'+'_blur'] = 5/num_intensities
labels['Mosmed_0032'+'_resolution'] = 5/num_intensities
labels['Mosmed_0032'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0032'+'_motion'] = 5/num_intensities
labels['Mosmed_0032'+'_noise'] = 5/num_intensities
labels['Mosmed_0032'+'_spike'] = 5/num_intensities
labels['Mosmed_0033'+'_blur'] = 5/num_intensities
labels['Mosmed_0033'+'_resolution'] = 5/num_intensities
labels['Mosmed_0033'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0033'+'_motion'] = 5/num_intensities
labels['Mosmed_0033'+'_noise'] = 5/num_intensities
labels['Mosmed_0033'+'_spike'] = 4/num_intensities
labels['Mosmed_0034'+'_blur'] = 5/num_intensities
labels['Mosmed_0034'+'_resolution'] = 5/num_intensities
labels['Mosmed_0034'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0034'+'_motion'] = 5/num_intensities
labels['Mosmed_0034'+'_noise'] = 5/num_intensities
labels['Mosmed_0034'+'_spike'] = 5/num_intensities
labels['Mosmed_0035'+'_blur'] = 5/num_intensities
labels['Mosmed_0035'+'_resolution'] = 5/num_intensities
labels['Mosmed_0035'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0035'+'_motion'] = 5/num_intensities
labels['Mosmed_0035'+'_noise'] = 5/num_intensities
labels['Mosmed_0035'+'_spike'] = 5/num_intensities
labels['Mosmed_0036'+'_blur'] = 5/num_intensities
labels['Mosmed_0036'+'_resolution'] = 5/num_intensities
labels['Mosmed_0036'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0036'+'_motion'] = 5/num_intensities
labels['Mosmed_0036'+'_noise'] = 5/num_intensities
labels['Mosmed_0036'+'_spike'] = 5/num_intensities
labels['Mosmed_0037'+'_blur'] = 5/num_intensities
labels['Mosmed_0037'+'_resolution'] = 5/num_intensities
labels['Mosmed_0037'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0037'+'_motion'] = 5/num_intensities
labels['Mosmed_0037'+'_noise'] = 5/num_intensities
labels['Mosmed_0037'+'_spike'] = 5/num_intensities
labels['Mosmed_0038'+'_blur'] = 5/num_intensities
labels['Mosmed_0038'+'_resolution'] = 5/num_intensities
labels['Mosmed_0038'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0038'+'_motion'] = 5/num_intensities
labels['Mosmed_0038'+'_noise'] = 5/num_intensities
labels['Mosmed_0038'+'_spike'] = 5/num_intensities
labels['Mosmed_0039'+'_blur'] = 5/num_intensities
labels['Mosmed_0039'+'_resolution'] = 5/num_intensities
labels['Mosmed_0039'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0039'+'_motion'] = 5/num_intensities
labels['Mosmed_0039'+'_noise'] = 5/num_intensities
labels['Mosmed_0039'+'_spike'] = 5/num_intensities
labels['Mosmed_0040'+'_blur'] = 5/num_intensities
labels['Mosmed_0040'+'_resolution'] = 5/num_intensities
labels['Mosmed_0040'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0040'+'_motion'] = 5/num_intensities
labels['Mosmed_0040'+'_noise'] = 5/num_intensities
labels['Mosmed_0040'+'_spike'] = 5/num_intensities
labels['Mosmed_0041'+'_blur'] = 5/num_intensities
labels['Mosmed_0041'+'_resolution'] = 5/num_intensities
labels['Mosmed_0041'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0041'+'_motion'] = 5/num_intensities
labels['Mosmed_0041'+'_noise'] = 5/num_intensities
labels['Mosmed_0041'+'_spike'] = 5/num_intensities
labels['Mosmed_0042'+'_blur'] = 4/num_intensities
labels['Mosmed_0042'+'_resolution'] = 5/num_intensities
labels['Mosmed_0042'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0042'+'_motion'] = 5/num_intensities
labels['Mosmed_0042'+'_noise'] = 5/num_intensities
labels['Mosmed_0042'+'_spike'] = 4/num_intensities
labels['Mosmed_0043'+'_blur'] = 4/num_intensities
labels['Mosmed_0043'+'_resolution'] = 5/num_intensities
labels['Mosmed_0043'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0043'+'_motion'] = 5/num_intensities
labels['Mosmed_0043'+'_noise'] = 5/num_intensities
labels['Mosmed_0043'+'_spike'] = 5/num_intensities
labels['Mosmed_0044'+'_blur'] = 4/num_intensities
labels['Mosmed_0044'+'_resolution'] = 5/num_intensities
labels['Mosmed_0044'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0044'+'_motion'] = 5/num_intensities
labels['Mosmed_0044'+'_noise'] = 5/num_intensities
labels['Mosmed_0044'+'_spike'] = 4/num_intensities
labels['Mosmed_0045'+'_blur'] = 4/num_intensities
labels['Mosmed_0045'+'_resolution'] = 5/num_intensities
labels['Mosmed_0045'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0045'+'_motion'] = 5/num_intensities
labels['Mosmed_0045'+'_noise'] = 5/num_intensities
labels['Mosmed_0045'+'_spike'] = 4/num_intensities
labels['Mosmed_0046'+'_blur'] = 4/num_intensities
labels['Mosmed_0046'+'_resolution'] = 5/num_intensities
labels['Mosmed_0046'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0046'+'_motion'] = 5/num_intensities
labels['Mosmed_0046'+'_noise'] = 5/num_intensities
labels['Mosmed_0046'+'_spike'] = 5/num_intensities
labels['Mosmed_0047'+'_blur'] = 5/num_intensities
labels['Mosmed_0047'+'_resolution'] = 5/num_intensities
labels['Mosmed_0047'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0047'+'_motion'] = 5/num_intensities
labels['Mosmed_0047'+'_noise'] = 5/num_intensities
labels['Mosmed_0047'+'_spike'] = 5/num_intensities
labels['Mosmed_0048'+'_blur'] = 4/num_intensities
labels['Mosmed_0048'+'_resolution'] = 5/num_intensities
labels['Mosmed_0048'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0048'+'_motion'] = 5/num_intensities
labels['Mosmed_0048'+'_noise'] = 5/num_intensities
labels['Mosmed_0048'+'_spike'] = 5/num_intensities
labels['Mosmed_0049'+'_blur'] = 5/num_intensities
labels['Mosmed_0049'+'_resolution'] = 5/num_intensities
labels['Mosmed_0049'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0049'+'_motion'] = 5/num_intensities
labels['Mosmed_0049'+'_noise'] = 5/num_intensities
labels['Mosmed_0049'+'_spike'] = 5/num_intensities
labels['Mosmed_0050'+'_blur'] = 4/num_intensities
labels['Mosmed_0050'+'_resolution'] = 5/num_intensities
labels['Mosmed_0050'+'_ghosting'] = 5/num_intensities
labels['Mosmed_0050'+'_motion'] = 5/num_intensities
labels['Mosmed_0050'+'_noise'] = 5/num_intensities
labels['Mosmed_0050'+'_spike'] = 4/num_intensities
labels['Radiopedia_0001'+'_blur'] = 4/num_intensities
labels['Radiopedia_0001'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0001'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0001'+'_motion'] = 5/num_intensities
labels['Radiopedia_0001'+'_noise'] = 4/num_intensities
labels['Radiopedia_0001'+'_spike'] = 5/num_intensities
labels['Radiopedia_0002'+'_blur'] = 4/num_intensities
labels['Radiopedia_0002'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0002'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0002'+'_motion'] = 5/num_intensities
labels['Radiopedia_0002'+'_noise'] = 5/num_intensities
labels['Radiopedia_0002'+'_spike'] = 5/num_intensities
labels['Radiopedia_0003'+'_blur'] = 4/num_intensities
labels['Radiopedia_0003'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0003'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0003'+'_motion'] = 5/num_intensities
labels['Radiopedia_0003'+'_noise'] = 5/num_intensities
labels['Radiopedia_0003'+'_spike'] = 5/num_intensities
labels['Radiopedia_0004'+'_blur'] = 4/num_intensities
labels['Radiopedia_0004'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0004'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0004'+'_motion'] = 5/num_intensities
labels['Radiopedia_0004'+'_noise'] = 5/num_intensities
labels['Radiopedia_0004'+'_spike'] = 5/num_intensities
labels['Radiopedia_0005'+'_blur'] = 5/num_intensities
labels['Radiopedia_0005'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0005'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0005'+'_motion'] = 5/num_intensities
labels['Radiopedia_0005'+'_noise'] = 4/num_intensities
labels['Radiopedia_0005'+'_spike'] = 5/num_intensities
labels['Radiopedia_0006'+'_blur'] = 4/num_intensities
labels['Radiopedia_0006'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0006'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0006'+'_motion'] = 5/num_intensities
labels['Radiopedia_0006'+'_noise'] = 4/num_intensities
labels['Radiopedia_0006'+'_spike'] = 5/num_intensities
labels['Radiopedia_0007'+'_blur'] = 4/num_intensities
labels['Radiopedia_0007'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0007'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0007'+'_motion'] = 5/num_intensities
labels['Radiopedia_0007'+'_noise'] = 4/num_intensities
labels['Radiopedia_0007'+'_spike'] = 3/num_intensities
labels['Radiopedia_0008'+'_blur'] = 4/num_intensities
labels['Radiopedia_0008'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0008'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0008'+'_motion'] = 5/num_intensities
labels['Radiopedia_0008'+'_noise'] = 4/num_intensities
labels['Radiopedia_0008'+'_spike'] = 5/num_intensities
labels['Radiopedia_0009'+'_blur'] = 4/num_intensities
labels['Radiopedia_0009'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0009'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0009'+'_motion'] = 5/num_intensities
labels['Radiopedia_0009'+'_noise'] = 5/num_intensities
labels['Radiopedia_0009'+'_spike'] = 5/num_intensities
labels['Radiopedia_0010'+'_blur'] = 3/num_intensities
labels['Radiopedia_0010'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0010'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0010'+'_motion'] = 5/num_intensities
labels['Radiopedia_0010'+'_noise'] = 4/num_intensities
labels['Radiopedia_0010'+'_spike'] = 5/num_intensities
labels['Radiopedia_0011'+'_blur'] = 5/num_intensities
labels['Radiopedia_0011'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0011'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0011'+'_motion'] = 5/num_intensities
labels['Radiopedia_0011'+'_noise'] = 5/num_intensities
labels['Radiopedia_0011'+'_spike'] = 5/num_intensities
labels['Radiopedia_0012'+'_blur'] = 5/num_intensities
labels['Radiopedia_0012'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0012'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0012'+'_motion'] = 5/num_intensities
labels['Radiopedia_0012'+'_noise'] = 5/num_intensities
labels['Radiopedia_0012'+'_spike'] = 5/num_intensities
labels['Radiopedia_0013'+'_blur'] = 5/num_intensities
labels['Radiopedia_0013'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0013'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0013'+'_motion'] = 5/num_intensities
labels['Radiopedia_0013'+'_noise'] = 4/num_intensities
labels['Radiopedia_0013'+'_spike'] = 5/num_intensities
labels['Radiopedia_0014'+'_blur'] = 5/num_intensities
labels['Radiopedia_0014'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0014'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0014'+'_motion'] = 5/num_intensities
labels['Radiopedia_0014'+'_noise'] = 5/num_intensities
labels['Radiopedia_0014'+'_spike'] = 5/num_intensities
labels['Radiopedia_0015'+'_blur'] = 5/num_intensities
labels['Radiopedia_0015'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0015'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0015'+'_motion'] = 5/num_intensities
labels['Radiopedia_0015'+'_noise'] = 5/num_intensities
labels['Radiopedia_0015'+'_spike'] = 5/num_intensities
labels['Radiopedia_0016'+'_blur'] = 5/num_intensities
labels['Radiopedia_0016'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0016'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0016'+'_motion'] = 5/num_intensities
labels['Radiopedia_0016'+'_noise'] = 4/num_intensities
labels['Radiopedia_0016'+'_spike'] = 5/num_intensities
labels['Radiopedia_0017'+'_blur'] = 5/num_intensities
labels['Radiopedia_0017'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0017'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0017'+'_motion'] = 5/num_intensities
labels['Radiopedia_0017'+'_noise'] = 4/num_intensities
labels['Radiopedia_0017'+'_spike'] = 5/num_intensities
labels['Radiopedia_0018'+'_blur'] = 5/num_intensities
labels['Radiopedia_0018'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0018'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0018'+'_motion'] = 5/num_intensities
labels['Radiopedia_0018'+'_noise'] = 4/num_intensities
labels['Radiopedia_0018'+'_spike'] = 5/num_intensities
labels['Radiopedia_0019'+'_blur'] = 5/num_intensities
labels['Radiopedia_0019'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0019'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0019'+'_motion'] = 5/num_intensities
labels['Radiopedia_0019'+'_noise'] = 4/num_intensities
labels['Radiopedia_0019'+'_spike'] = 5/num_intensities
labels['Radiopedia_0020'+'_blur'] = 5/num_intensities
labels['Radiopedia_0020'+'_resolution'] = 5/num_intensities
labels['Radiopedia_0020'+'_ghosting'] = 5/num_intensities
labels['Radiopedia_0020'+'_motion'] = 5/num_intensities
labels['Radiopedia_0020'+'_noise'] = 4/num_intensities
labels['Radiopedia_0020'+'_spike'] = 5/num_intensities
# Add GC labels (defined by hand --> do not delete) to labels dict --> for uncropped images!
labels['GC_Corona_volume-covid19-A-0003'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0003'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0003'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0003'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0003'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0003'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0011'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0011'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0011'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0011'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0011'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0011'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0013'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0013'+'_resolution'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0013'+'_ghosting'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0013'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0013'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0013'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0014'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0014'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0014'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0014'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0014'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0014'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0016'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0016'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0016'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0016'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0016'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0016'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0025'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0025'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0025'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0025'+'_motion'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0025'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0025'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0031'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0031'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0031'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0031'+'_motion'] = 2/num_intensities
labels['GC_Corona_volume-covid19-A-0031'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0031'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0034'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0034'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0034'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0034'+'_motion'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0034'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0034'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0038'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0038'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0038'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0038'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0038'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0038'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0039'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0039'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0039'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0039'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0039'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0039'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0041'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0041'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0041'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0041'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0041'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0041'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0044'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0044'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0044'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0044'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0044'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0044'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0046'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0046'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0046'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0046'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0046'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0046'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0047_1'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0047_1'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0047_1'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0047_1'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0047_1'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0047_1'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0053'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0053'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0053'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0053'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0053'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0053'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0054'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0054'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0054'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0054'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0054'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0054'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0066'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0066'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0066'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0066'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0066'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0066'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0070'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0070'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0070'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0070'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0070'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0070'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0072'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0072'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0072'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0072'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0072'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0072'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0073'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0073'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0073'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0073'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0073'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0073'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0074_1'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0074_1'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0074_1'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0074_1'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0074_1'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0074_1'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0083'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0083'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0083'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0083'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0083'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0083'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0090'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0090'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0090'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0090'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0090'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0090'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0092'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0092'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0092'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0092'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0092'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0092'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0096'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0096'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0096'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0096'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0096'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0096'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0106'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0106'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0106'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0106'+'_motion'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0106'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0106'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0110'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0110'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0110'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0110'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0110'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0110'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0112'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0112'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0112'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0112'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0112'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0112'+'_spike'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0114'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0114'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0114'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0114'+'_motion'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0114'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0114'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0120'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0120'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0120'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0120'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0120'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0120'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0129'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0129'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0129'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0129'+'_motion'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0129'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0129'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0130'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0130'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0130'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0130'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0130'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0130'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0133'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0133'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0133'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0133'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0133'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0133'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0147'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0147'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0147'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0147'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0147'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0147'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0151'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0151'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0151'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0151'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0151'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0151'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0154'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0154'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0154'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0154'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0154'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0154'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0165'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0165'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0165'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0165'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0165'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0165'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0167_1'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0167_1'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0167_1'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0167_1'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0167_1'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0167_1'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0173'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0173'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0173'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0173'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0173'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0173'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0178'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0178'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0178'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0178'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0178'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0178'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0179'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0179'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0179'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0179'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0179'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0179'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0181'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0181'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0181'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0181'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0181'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0181'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0187'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0187'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0187'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0187'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0187'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0187'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0196_0'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0196_0'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0196_0'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0196_0'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0196_0'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0196_0'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0199'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0199'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0199'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0199'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0199'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0199'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0201'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0201'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0201'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0201'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0201'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0201'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0202_0'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0202_0'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0202_0'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0202_0'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0202_0'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0202_0'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0214'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0214'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0214'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0214'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0214'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0214'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0215'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0215'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0215'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0215'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0215'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0215'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0228'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0228'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0228'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0228'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0228'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0228'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0233'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0233'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0233'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0233'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0233'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0233'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0236'+'_blur'] = 2/num_intensities
labels['GC_Corona_volume-covid19-A-0236'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0236'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0236'+'_motion'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0236'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0236'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0237'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0237'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0237'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0237'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0237'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0237'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0239'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0239'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0239'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0239'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0239'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0239'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0246'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0246'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0246'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0246'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0246'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0246'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0247'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0247'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0247'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0247'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0247'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0247'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0251'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0251'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0251'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0251'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0251'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0251'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0252'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0252'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0252'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0252'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0252'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0252'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0255'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0255'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0255'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0255'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0255'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0255'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0256_1'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0256_1'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0256_1'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0256_1'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0256_1'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0256_1'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0263'+'_blur'] = 2/num_intensities
labels['GC_Corona_volume-covid19-A-0263'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0263'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0263'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0263'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0263'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0264'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0264'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0264'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0264'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0264'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0264'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0267'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0267'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0267'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0267'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0267'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0267'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0270'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0270'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0270'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0270'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0270'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0270'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0282'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0282'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0282'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0282'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0282'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0282'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0285'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0285'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0285'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0285'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0285'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0285'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0288'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0288'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0288'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0288'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0288'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0288'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0295'+'_blur'] = 2/num_intensities
labels['GC_Corona_volume-covid19-A-0295'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0295'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0295'+'_motion'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0295'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0295'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0296'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0296'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0296'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0296'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0296'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0296'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0299'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0299'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0299'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0299'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0299'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0299'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0301'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0301'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0301'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0301'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0301'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0301'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0307'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0307'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0307'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0307'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0307'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0307'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0313'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0313'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0313'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0313'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0313'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0313'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0314'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0314'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0314'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0314'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0314'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0314'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0315'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0315'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0315'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0315'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0315'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0315'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0316'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0316'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0316'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0316'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0316'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0316'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0320'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0320'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0320'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0320'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0320'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0320'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0323'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0323'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0323'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0323'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0323'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0323'+'_spike'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0329'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0329'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0329'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0329'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0329'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0329'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0331'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0331'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0331'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0331'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0331'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0331'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0332'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0332'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0332'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0332'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0332'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0332'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0338'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0338'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0338'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0338'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0338'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0338'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0339'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0339'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0339'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0339'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0339'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0339'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0342'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0342'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0342'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0342'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0342'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0342'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0347'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0347'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0347'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0347'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0347'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0347'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0351'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0351'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0351'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0351'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0351'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0351'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0354'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0354'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0354'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0354'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0354'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0354'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0355'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0355'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0355'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0355'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0355'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0355'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0360'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0360'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0360'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0360'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0360'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0360'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0361'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0361'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0361'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0361'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0361'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0361'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0366'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0366'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0366'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0366'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0366'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0366'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0372'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0372'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0372'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0372'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0372'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0372'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0377'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0377'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0377'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0377'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0377'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0377'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0380'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0380'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0380'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0380'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0380'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0380'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0382'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0382'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0382'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0382'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0382'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0382'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0383_1'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0383_1'+'_resolution'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0383_1'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0383_1'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0383_1'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0383_1'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0386'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0386'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0386'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0386'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0386'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0386'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0387'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0387'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0387'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0387'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0387'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0387'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0388'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0388'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0388'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0388'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0388'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0388'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0391'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0391'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0391'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0391'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0391'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0391'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0392'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0392'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0392'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0392'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0392'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0392'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0394'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0394'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0394'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0394'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0394'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0394'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0397'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0397'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0397'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0397'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0397'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0397'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0400'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0400'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0400'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0400'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0400'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0400'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0402'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0402'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0402'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0402'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0402'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0402'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0407'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0407'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0407'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0407'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0407'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0407'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0413'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0413'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0413'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0413'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0413'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0413'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0414'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0414'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0414'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0414'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0414'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0414'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0416'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0416'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0416'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0416'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0416'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0416'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0417'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0417'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0417'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0417'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0417'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0417'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0418'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0418'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0418'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0418'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0418'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0418'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0421'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0421'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0421'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0421'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0421'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0421'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0422'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0422'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0422'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0422'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0422'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0422'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0423'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0423'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0423'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0423'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0423'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0423'+'_spike'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0435'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0435'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0435'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0435'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0435'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0435'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0437'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0437'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0437'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0437'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0437'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0437'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0443'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0443'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0443'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0443'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0443'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0443'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0445'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0445'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0445'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0445'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0445'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0445'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0455'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0455'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0455'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0455'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0455'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0455'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0462'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0462'+'_resolution'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0462'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0462'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0462'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0462'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0463'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0463'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0463'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0463'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0463'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0463'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0464'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0464'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0464'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0464'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0464'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0464'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0472'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0472'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0472'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0472'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0472'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0472'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0473'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0473'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0473'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0473'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0473'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0473'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0475'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0475'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0475'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0475'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0475'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0475'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0476'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0476'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0476'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0476'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0476'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0476'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0479'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0479'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0479'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0479'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0479'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0479'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0483'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0483'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0483'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0483'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0483'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0483'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0494'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0494'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0494'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0494'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0494'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0494'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0495'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0495'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0495'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0495'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0495'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0495'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0498'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0498'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0498'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0498'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0498'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0498'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0500'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0500'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0500'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0500'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0500'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0500'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0502'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0502'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0502'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0502'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0502'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0502'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0504'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0504'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0504'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0504'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0504'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0504'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0511'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0511'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0511'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0511'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0511'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0511'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0521'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0521'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0521'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0521'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0521'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0521'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0522'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0522'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0522'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0522'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0522'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0522'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0524'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0524'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0524'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0524'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0524'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0524'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0525'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0525'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0525'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0525'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0525'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0525'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0526'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0526'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0526'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0526'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0526'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0526'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0530'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0530'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0530'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0530'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0530'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0530'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0531'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0531'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0531'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0531'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0531'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0531'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0534'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0534'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0534'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0534'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0534'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0534'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0537'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0537'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0537'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0537'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0537'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0537'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0548'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0548'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0548'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0548'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0548'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0548'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0553'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0553'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0553'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0553'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0553'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0553'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0557'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0557'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0557'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0557'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0557'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0557'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0559'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0559'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0559'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0559'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0559'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0559'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0560'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0560'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0560'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0560'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0560'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0560'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0562'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0562'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0562'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0562'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0562'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0562'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0567'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0567'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0567'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0567'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0567'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0567'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0569'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0569'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0569'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0569'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0569'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0569'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0570'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0570'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0570'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0570'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0570'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0570'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0573'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0573'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0573'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0573'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0573'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0573'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0575'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0575'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0575'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0575'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0575'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0575'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0576'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0576'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0576'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0576'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0576'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0576'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0579'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0579'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0579'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0579'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0579'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0579'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0581'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0581'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0581'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0581'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0581'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0581'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0585'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0585'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0585'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0585'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0585'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0585'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0586'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0586'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0586'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0586'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0586'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0586'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0589'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0589'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0589'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0589'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0589'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0589'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0590'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0590'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0590'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0590'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0590'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0590'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0599'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0599'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0599'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0599'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0599'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0599'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0600'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0600'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0600'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0600'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0600'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0600'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0604'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0604'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0604'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0604'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0604'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0604'+'_spike'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0612'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0612'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0612'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0612'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0612'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0612'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0614'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0614'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0614'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0614'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0614'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0614'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0623'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0623'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0623'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0623'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0623'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0623'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0626'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0626'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0626'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0626'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0626'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0626'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0627'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0627'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0627'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0627'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0627'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0627'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0629'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0629'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0629'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0629'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0629'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0629'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0635'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0635'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0635'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0635'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0635'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0635'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0636'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0636'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0636'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0636'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0636'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0636'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0638'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0638'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0638'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0638'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0638'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0638'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0643'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0643'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0643'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0643'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0643'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0643'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0648'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0648'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0648'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0648'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0648'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0648'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0652'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0652'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0652'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0652'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0652'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0652'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0656'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0656'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0656'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0656'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0656'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0656'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0657'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0657'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0657'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0657'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0657'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0657'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0658'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0658'+'_resolution'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0658'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0658'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0658'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0658'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0659'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0659'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0659'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0659'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0659'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0659'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0660'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0660'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0660'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0660'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0660'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0660'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0665'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0665'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0665'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0665'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0665'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0665'+'_spike'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0666'+'_blur'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0666'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0666'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0666'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0666'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0666'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0669'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0669'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0669'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0669'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0669'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0669'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0670'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0670'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0670'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0670'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0670'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0670'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0678'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0678'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0678'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0678'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0678'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0678'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0685'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0685'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0685'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0685'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0685'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0685'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0686'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0686'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0686'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0686'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0686'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0686'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0694'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0694'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0694'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0694'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0694'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0694'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0696'+'_blur'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0696'+'_resolution'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0696'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0696'+'_motion'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0696'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0696'+'_spike'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0698'+'_blur'] = 3/num_intensities
labels['GC_Corona_volume-covid19-A-0698'+'_resolution'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0698'+'_ghosting'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0698'+'_motion'] = 4/num_intensities
labels['GC_Corona_volume-covid19-A-0698'+'_noise'] = 5/num_intensities
labels['GC_Corona_volume-covid19-A-0698'+'_spike'] = 5/num_intensities
# Save labels
print("Saving generated labels..")
if not os.path.isdir(target_path):
os.makedirs(target_path)
with open(os.path.join(target_path, 'labels.json'), 'w') as fp:
json.dump(labels, fp, sort_keys=True, indent=4)
| 72.846256
| 158
| 0.738336
| 30,691
| 218,903
| 4.901176
| 0.012707
| 0.261531
| 0.364973
| 0.306338
| 0.978667
| 0.977257
| 0.864907
| 0.855925
| 0.825571
| 0.825571
| 0
| 0.094846
| 0.104718
| 218,903
| 3,005
| 159
| 72.846256
| 0.672691
| 0.007305
| 0
| 0.786752
| 1
| 0
| 0.46202
| 0.330737
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000676
| false
| 0
| 0.000676
| 0
| 0.001352
| 0.001014
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
5534be5203045ba0c600bc733277d92909861053
| 207
|
py
|
Python
|
deepethogram/__init__.py
|
monajalal/deepethogram
|
58cfa6843ef4a384ea7ec8c9786edb1ee7111b5f
|
[
"FSFAP"
] | null | null | null |
deepethogram/__init__.py
|
monajalal/deepethogram
|
58cfa6843ef4a384ea7ec8c9786edb1ee7111b5f
|
[
"FSFAP"
] | null | null | null |
deepethogram/__init__.py
|
monajalal/deepethogram
|
58cfa6843ef4a384ea7ec8c9786edb1ee7111b5f
|
[
"FSFAP"
] | null | null | null |
# from deepethogram import feature_extractor, flow_generator, gui, sequence, dataloaders, metrics, utils, viz, zscore
# from deepethogram import feature_extractor, flow_generator, gui, sequence, dataloaders,
| 103.5
| 117
| 0.826087
| 24
| 207
| 6.958333
| 0.583333
| 0.191617
| 0.263473
| 0.347305
| 0.874252
| 0.874252
| 0.874252
| 0.874252
| 0.874252
| 0.874252
| 0
| 0
| 0.101449
| 207
| 2
| 118
| 103.5
| 0.897849
| 0.980676
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
555d8ce24d6c1180d85b34c70799ad0c7a382306
| 2,020
|
py
|
Python
|
hw1/performance_metrics.py
|
brawnerquan/comp135-20f-assignments
|
9570c17b872b7334b0e5b86160d868e3b854c71a
|
[
"MIT"
] | null | null | null |
hw1/performance_metrics.py
|
brawnerquan/comp135-20f-assignments
|
9570c17b872b7334b0e5b86160d868e3b854c71a
|
[
"MIT"
] | null | null | null |
hw1/performance_metrics.py
|
brawnerquan/comp135-20f-assignments
|
9570c17b872b7334b0e5b86160d868e3b854c71a
|
[
"MIT"
] | null | null | null |
import numpy as np
def calc_mean_squared_error(y_N, yhat_N):
''' Compute the mean squared error given true and predicted values
Args
----
y_N : 1D array, shape (N,)
Each entry represents 'ground truth' numeric response for an example
yhat_N : 1D array, shape (N,)
Each entry representes predicted numeric response for an example
Returns
-------
mse : scalar float
Mean squared error performance metric
.. math:
mse(y, \hat{y}) = \frac{1}{N} \sum_{n=1}^N (y_n - \hat{y}_n)^2
Examples
--------
>>> y_N = np.asarray([-2, 0, 2], dtype=np.float64)
>>> yhat_N = np.asarray([-4, 0, 2], dtype=np.float64)
>>> calc_mean_squared_error(y_N, yhat_N)
1.3333333333333333
'''
if yhat_N.shape[0] == 0:
return 0
return np.sum((yhat_N - y_N)**2)/yhat_N.shape[0]
# y_N = np.asarray([-2, 0, 2], dtype=np.float64)
# yhat_N = np.asarray([-4, 0, 2], dtype=np.float64)
# print(calc_mean_squared_error(y_N, yhat_N))
def calc_mean_absolute_error(y_N, yhat_N):
''' Compute the mean absolute error given true and predicted values
Args
----
y_N : 1D array, shape (N,)
Each entry represents 'ground truth' numeric response for an example
yhat_N : 1D array, shape (N,)
Each entry representes predicted numeric response for an example
Returns
-------
mae : scalar float
Mean absolute error performance metric
.. math:
mae(y, \hat{y}) = \frac{1}{N} \sum_{n=1}^N | y_n - \hat{y}_n |
Examples
--------
>>> y_N = np.asarray([-2, 0, 2], dtype=np.float64)
>>> yhat_N = np.asarray([-4, 0, 2], dtype=np.float64)
>>> calc_mean_absolute_error(y_N, yhat_N)
0.6666666666666666
'''
if yhat_N.shape[0] == 0:
return 0
return np.sum(abs(yhat_N - y_N))/yhat_N.shape[0]
# y_N = np.asarray([-2, 0, 2], dtype=np.float64)
# yhat_N = np.asarray([-4, 0, 2], dtype=np.float64)
# print(calc_mean_absolute_error(y_N, yhat_N))
| 29.705882
| 76
| 0.604455
| 319
| 2,020
| 3.652038
| 0.188088
| 0.030901
| 0.06867
| 0.061803
| 0.830043
| 0.830043
| 0.830043
| 0.830043
| 0.691845
| 0.691845
| 0
| 0.060131
| 0.242574
| 2,020
| 67
| 77
| 30.149254
| 0.701307
| 0.753465
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0
| 0.777778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
5578ae61eb7e776244850fe77af275498047394e
| 108
|
py
|
Python
|
maci/replay_buffers/__init__.py
|
bbrito/mapr2
|
5aa1a4c85c28918d9f16e5544793bf5574d7c49e
|
[
"Apache-2.0"
] | 35
|
2019-01-13T17:55:03.000Z
|
2022-02-23T17:06:53.000Z
|
maci/replay_buffers/__init__.py
|
arita37/mapr2
|
57f76875a4a6aed1850d3fb8604683bfe8a0e09b
|
[
"Apache-2.0"
] | 18
|
2019-03-10T23:12:00.000Z
|
2022-03-21T22:17:09.000Z
|
maci/replay_buffers/__init__.py
|
arita37/mapr2
|
57f76875a4a6aed1850d3fb8604683bfe8a0e09b
|
[
"Apache-2.0"
] | 19
|
2019-01-13T20:47:00.000Z
|
2021-11-09T05:59:13.000Z
|
from .simple_replay_buffer import SimpleReplayBuffer
from .indexed_replay_buffer import IndexedReplayBuffer
| 36
| 54
| 0.907407
| 12
| 108
| 7.833333
| 0.666667
| 0.255319
| 0.382979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 108
| 2
| 55
| 54
| 0.94
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5592bfaf4167cedf4bce5bcebe4e8c5d5656fab6
| 77,533
|
py
|
Python
|
Midas/test/valetest.py
|
Ivo-Balbaert/Vale
|
8df47e5d953b5c623a25ae4e8e494202fb736dab
|
[
"Apache-2.0"
] | null | null | null |
Midas/test/valetest.py
|
Ivo-Balbaert/Vale
|
8df47e5d953b5c623a25ae4e8e494202fb736dab
|
[
"Apache-2.0"
] | null | null | null |
Midas/test/valetest.py
|
Ivo-Balbaert/Vale
|
8df47e5d953b5c623a25ae4e8e494202fb736dab
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import subprocess
import platform
import os.path
import os
import sys
import shutil
import glob
from typing import Dict, Any, List, Callable
def procrun(args: List[str], **kwargs) -> subprocess.CompletedProcess:
# print("Running: " + " ".join(args))
return subprocess.run(args, capture_output=True, text=True, **kwargs)
PATH_TO_SAMPLES = "../Valestrom/Samples/test/main/resources/"
class ValeTest(unittest.TestCase):
GENPATH: str = os.environ.get('GENPATH', ".")
def valec(self,
in_filepaths: List[str],
o_files_dir: str,
exe_name: str,
region_override: str) -> subprocess.CompletedProcess:
assert self.GENPATH
python = "python" if self.windows else "python3"
return procrun(
[python,
f"{self.GENPATH}/valec.py",
"build",
"--verify",
"--llvmir",
"--census",
"--flares",
"--region-override", region_override,
"--output-dir", o_files_dir,
"--exports-dir", o_files_dir,
"--add-exports-include-path",
"-o",
exe_name] + in_filepaths)
def exec(self, exe_file: str) -> subprocess.CompletedProcess:
return procrun([f"./{exe_file}"])
@classmethod
def setUpClass(cls) -> None:
print(
f"Using valec from {cls.GENPATH}. " +
"Set GENPATH env var if this is incorrect",
file=sys.stderr
)
def setUp(self) -> None:
self.GENPATH: str = type(self).GENPATH
self.windows = platform.system() == 'Windows'
def compile_and_execute(
self, in_filepaths: List[str], region_override: str) -> subprocess.CompletedProcess:
first_vale_filepath = in_filepaths[0]
file_name_without_extension = os.path.splitext(os.path.basename(first_vale_filepath))[0]
build_dir = f"test/test_build/{file_name_without_extension}_build"
proc = self.valec(in_filepaths, build_dir, file_name_without_extension, region_override)
self.assertEqual(proc.returncode, 0,
f"valec couldn't compile {in_filepaths}:\n" +
proc.stdout + "\n" + proc.stderr)
exe_file = f"{build_dir}/{file_name_without_extension}"
proc = self.exec(exe_file)
return proc
def compile_and_execute_and_expect_return_code(
self, vale_files: List[str], region_override: str, expected_return_code) -> None:
proc = self.compile_and_execute(vale_files, region_override)
# print(proc.stdout)
# print(proc.stderr)
self.assertEqual(proc.returncode, expected_return_code,
f"Unexpected result: {proc.returncode}\n" + proc.stdout + proc.stderr)
def test_assist_mutswaplocals(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/mutswaplocals.vale"], "assist", 42)
def test_unsafefast_mutswaplocals(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/mutswaplocals.vale"], "unsafe-fast", 42)
def test_resilientv0_mutswaplocals(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/mutswaplocals.vale"], "resilient-v0", 42)
def test_resilientv1_mutswaplocals(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/mutswaplocals.vale"], "resilient-v1", 42)
def test_resilientv2_mutswaplocals(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/mutswaplocals.vale"], "resilient-v2", 42)
def test_resilientv3_mutswaplocals(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/mutswaplocals.vale"], "resilient-v3", 42)
def test_naiverc_mutswaplocals(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/mutswaplocals.vale"], "naive-rc", 42)
def test_assist_addret(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/addret.vale"], "assist", 7)
def test_unsafefast_addret(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/addret.vale"], "unsafe-fast", 7)
def test_resilientv0_addret(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/addret.vale"], "resilient-v0", 7)
def test_resilientv1_addret(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/addret.vale"], "resilient-v1", 7)
def test_resilientv2_addret(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/addret.vale"], "resilient-v2", 7)
def test_resilientv3_addret(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/addret.vale"], "resilient-v3", 7)
def test_naiverc_addret(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/addret.vale"], "naive-rc", 7)
def test_assist_immstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/immstruct.vale"], "assist", 5)
def test_unsafefast_immstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/immstruct.vale"], "unsafe-fast", 5)
def test_resilientv0_immstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/immstruct.vale"], "resilient-v0", 5)
def test_resilientv1_immstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/immstruct.vale"], "resilient-v1", 5)
def test_resilientv2_immstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/immstruct.vale"], "resilient-v2", 5)
def test_resilientv3_immstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/immstruct.vale"], "resilient-v3", 5)
def test_naiverc_immstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/immstruct.vale"], "naive-rc", 5)
def test_assist_memberrefcount(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/memberrefcount.vale"], "assist", 5)
def test_unsafefast_memberrefcount(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/memberrefcount.vale"], "unsafe-fast", 5)
def test_resilientv0_memberrefcount(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/memberrefcount.vale"], "resilient-v0", 5)
def test_resilientv1_memberrefcount(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/memberrefcount.vale"], "resilient-v1", 5)
def test_resilientv2_memberrefcount(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/memberrefcount.vale"], "resilient-v2", 5)
def test_resilientv3_memberrefcount(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/memberrefcount.vale"], "resilient-v3", 5)
def test_naiverc_memberrefcount(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/memberrefcount.vale"], "naive-rc", 5)
def test_assist_bigimmstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/bigimmstruct.vale"], "assist", 42)
def test_unsafefast_bigimmstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/bigimmstruct.vale"], "unsafe-fast", 42)
def test_resilientv0_bigimmstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/bigimmstruct.vale"], "resilient-v0", 42)
def test_resilientv1_bigimmstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/bigimmstruct.vale"], "resilient-v1", 42)
def test_resilientv2_bigimmstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/bigimmstruct.vale"], "resilient-v2", 42)
def test_resilientv3_bigimmstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/bigimmstruct.vale"], "resilient-v3", 42)
def test_naiverc_bigimmstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/bigimmstruct.vale"], "naive-rc", 42)
def test_assist_mutstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstruct.vale"], "assist", 8)
def test_unsafefast_mutstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstruct.vale"], "unsafe-fast", 8)
def test_resilientv0_mutstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstruct.vale"], "resilient-v0", 8)
def test_resilientv1_mutstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstruct.vale"], "resilient-v1", 8)
def test_resilientv2_mutstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstruct.vale"], "resilient-v2", 8)
def test_resilientv3_mutstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstruct.vale"], "resilient-v3", 8)
def test_naiverc_mutstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstruct.vale"], "naive-rc", 8)
def test_assist_lambda(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/lambdas/lambda.vale"], "assist", 42)
def test_unsafefast_lambda(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/lambdas/lambda.vale"], "unsafe-fast", 42)
def test_resilientv0_lambda(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/lambdas/lambda.vale"], "resilient-v0", 42)
def test_resilientv1_lambda(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/lambdas/lambda.vale"], "resilient-v1", 42)
def test_resilientv2_lambda(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/lambdas/lambda.vale"], "resilient-v2", 42)
def test_resilientv3_lambda(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/lambdas/lambda.vale"], "resilient-v3", 42)
def test_naiverc_lambda(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/lambdas/lambda.vale"], "naive-rc", 42)
def test_assist_if(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/if.vale"], "assist", 42)
def test_unsafefast_if(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/if.vale"], "unsafe-fast", 42)
def test_resilientv0_if(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/if.vale"], "resilient-v0", 42)
def test_resilientv1_if(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/if.vale"], "resilient-v1", 42)
def test_resilientv2_if(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/if.vale"], "resilient-v2", 42)
def test_resilientv3_if(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/if.vale"], "resilient-v3", 42)
def test_naiverc_if(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/if.vale"], "naive-rc", 42)
def test_assist_upcastif(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/upcastif.vale"], "assist", 42)
def test_unsafefast_upcastif(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/upcastif.vale"], "unsafe-fast", 42)
def test_resilientv0_upcastif(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/upcastif.vale"], "resilient-v0", 42)
def test_resilientv1_upcastif(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/upcastif.vale"], "resilient-v1", 42)
def test_resilientv2_upcastif(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/upcastif.vale"], "resilient-v2", 42)
def test_resilientv3_upcastif(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/upcastif.vale"], "resilient-v3", 42)
def test_naiverc_upcastif(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/upcastif.vale"], "naive-rc", 42)
def test_assist_ifnevers(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/ifnevers.vale"], "assist", 42)
def test_unsafefast_ifnevers(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/ifnevers.vale"], "unsafe-fast", 42)
def test_resilientv0_ifnevers(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/ifnevers.vale"], "resilient-v0", 42)
def test_resilientv1_ifnevers(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/ifnevers.vale"], "resilient-v1", 42)
def test_resilientv2_ifnevers(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/ifnevers.vale"], "resilient-v2", 42)
def test_resilientv3_ifnevers(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/ifnevers.vale"], "resilient-v3", 42)
def test_naiverc_ifnevers(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/ifnevers.vale"], "naive-rc", 42)
def test_assist_mutlocal(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/mutlocal.vale"], "assist", 42)
def test_unsafefast_mutlocal(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/mutlocal.vale"], "unsafe-fast", 42)
def test_resilientv0_mutlocal(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/mutlocal.vale"], "resilient-v0", 42)
def test_resilientv1_mutlocal(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/mutlocal.vale"], "resilient-v1", 42)
def test_resilientv2_mutlocal(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/mutlocal.vale"], "resilient-v2", 42)
def test_resilientv3_mutlocal(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/mutlocal.vale"], "resilient-v3", 42)
def test_naiverc_mutlocal(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/mutlocal.vale"], "naive-rc", 42)
def test_assist_while(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/while/while.vale"], "assist", 42)
def test_unsafefast_while(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/while/while.vale"], "unsafe-fast", 42)
def test_resilientv0_while(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/while/while.vale"], "resilient-v0", 42)
def test_resilientv1_while(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/while/while.vale"], "resilient-v1", 42)
def test_resilientv2_while(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/while/while.vale"], "resilient-v2", 42)
def test_resilientv3_while(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/while/while.vale"], "resilient-v3", 42)
def test_naiverc_while(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/while/while.vale"], "naive-rc", 42)
def test_assist_constraintRef(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/constraintRef.vale"], "assist", 8)
def test_unsafefast_constraintRef(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/constraintRef.vale"], "unsafe-fast", 8)
def test_resilientv0_constraintRef(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/constraintRef.vale"], "resilient-v0", 8)
def test_resilientv1_constraintRef(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/constraintRef.vale"], "resilient-v1", 8)
def test_resilientv2_constraintRef(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/constraintRef.vale"], "resilient-v2", 8)
def test_resilientv3_constraintRef(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/constraintRef.vale"], "resilient-v3", 8)
def test_naiverc_constraintRef(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/constraintRef.vale"], "naive-rc", 8)
def test_assist_knownsizeimmarray(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/knownsizeimmarray.vale"], "assist", 42)
def test_unsafefast_knownsizeimmarray(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/knownsizeimmarray.vale"], "unsafe-fast", 42)
def test_resilientv0_knownsizeimmarray(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/knownsizeimmarray.vale"], "resilient-v0", 42)
def test_resilientv1_knownsizeimmarray(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/knownsizeimmarray.vale"], "resilient-v1", 42)
def test_resilientv2_knownsizeimmarray(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/knownsizeimmarray.vale"], "resilient-v2", 42)
def test_resilientv3_knownsizeimmarray(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/knownsizeimmarray.vale"], "resilient-v3", 42)
def test_naiverc_knownsizeimmarray(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/knownsizeimmarray.vale"], "naive-rc", 42)
def test_assist_imminterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/virtuals/imminterface.vale"], "assist", 42)
def test_unsafefast_imminterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/virtuals/imminterface.vale"], "unsafe-fast", 42)
def test_resilientv0_imminterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/virtuals/imminterface.vale"], "resilient-v0", 42)
def test_resilientv1_imminterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/virtuals/imminterface.vale"], "resilient-v1", 42)
def test_resilientv2_imminterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/virtuals/imminterface.vale"], "resilient-v2", 42)
def test_resilientv3_imminterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/virtuals/imminterface.vale"], "resilient-v3", 42)
def test_naiverc_imminterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/virtuals/imminterface.vale"], "naive-rc", 42)
def test_assist_mutinterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/virtuals/mutinterface.vale"], "assist", 42)
def test_unsafefast_mutinterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/virtuals/mutinterface.vale"], "unsafe-fast", 42)
def test_resilientv0_mutinterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/virtuals/mutinterface.vale"], "resilient-v0", 42)
def test_resilientv1_mutinterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/virtuals/mutinterface.vale"], "resilient-v1", 42)
def test_resilientv2_mutinterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/virtuals/mutinterface.vale"], "resilient-v2", 42)
def test_resilientv3_mutinterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/virtuals/mutinterface.vale"], "resilient-v3", 42)
def test_naiverc_mutinterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/virtuals/mutinterface.vale"], "naive-rc", 42)
def test_assist_mutstructstore(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstructstore.vale"], "assist", 42)
def test_unsafefast_mutstructstore(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstructstore.vale"], "unsafe-fast", 42)
def test_resilientv0_mutstructstore(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstructstore.vale"], "resilient-v0", 42)
def test_resilientv1_mutstructstore(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstructstore.vale"], "resilient-v1", 42)
def test_resilientv2_mutstructstore(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstructstore.vale"], "resilient-v2", 42)
def test_resilientv3_mutstructstore(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstructstore.vale"], "resilient-v3", 42)
def test_naiverc_mutstructstore(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstructstore.vale"], "naive-rc", 42)
def test_assist_mutstructstoreinner(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstructstoreinner.vale"], "assist", 42)
def test_unsafefast_mutstructstoreinner(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstructstoreinner.vale"], "unsafe-fast", 42)
def test_resilientv0_mutstructstoreinner(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstructstoreinner.vale"], "resilient-v0", 42)
def test_resilientv1_mutstructstoreinner(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstructstoreinner.vale"], "resilient-v1", 42)
def test_resilientv2_mutstructstoreinner(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstructstoreinner.vale"], "resilient-v2", 42)
def test_resilientv3_mutstructstoreinner(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstructstoreinner.vale"], "resilient-v3", 42)
def test_naiverc_mutstructstoreinner(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/structs/mutstructstoreinner.vale"], "naive-rc", 42)
def test_assist_immusa(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/immusa.vale"], "assist", 3)
def test_unsafefast_immusa(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/immusa.vale"], "unsafe-fast", 3)
def test_resilientv0_immusa(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/immusa.vale"], "resilient-v0", 3)
def test_resilientv1_immusa(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/immusa.vale"], "resilient-v1", 3)
def test_resilientv2_immusa(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/immusa.vale"], "resilient-v2", 3)
def test_resilientv3_immusa(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/immusa.vale"], "resilient-v3", 3)
def test_naiverc_immusa(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/immusa.vale"], "naive-rc", 3)
# def test_assist_externimmusa(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/externimmusa.vale", PATH_TO_SAMPLES + "programs/arrays/externimmusa.c"], "assist", 15)
# def test_unsafefast_externimmusa(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/externimmusa.vale", PATH_TO_SAMPLES + "programs/arrays/externimmusa.c"], "unsafe-fast", 15)
# def test_resilientv0_externimmusa(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/externimmusa.vale", PATH_TO_SAMPLES + "programs/arrays/externimmusa.c"], "resilient-v0", 15)
# def test_resilientv1_externimmusa(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/externimmusa.vale", PATH_TO_SAMPLES + "programs/arrays/externimmusa.c"], "resilient-v1", 15)
# def test_resilientv2_externimmusa(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/externimmusa.vale", PATH_TO_SAMPLES + "programs/arrays/externimmusa.c"], "resilient-v2", 15)
# def test_resilientv3_externimmusa(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/externimmusa.vale", PATH_TO_SAMPLES + "programs/arrays/externimmusa.c"], "resilient-v3", 15)
# def test_naiverc_externimmusa(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/externimmusa.vale", PATH_TO_SAMPLES + "programs/arrays/externimmusa.c"], "naive-rc", 15)
def test_assist_immusalen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/immusalen.vale"], "assist", 5)
def test_unsafefast_immusalen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/immusalen.vale"], "unsafe-fast", 5)
def test_resilientv0_immusalen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/immusalen.vale"], "resilient-v0", 5)
def test_resilientv1_immusalen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/immusalen.vale"], "resilient-v1", 5)
def test_resilientv2_immusalen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/immusalen.vale"], "resilient-v2", 5)
def test_resilientv3_immusalen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/immusalen.vale"], "resilient-v3", 5)
def test_naiverc_immusalen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/immusalen.vale"], "naive-rc", 5)
def test_assist_mutusa(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/mutusa.vale"], "assist", 3)
def test_unsafefast_mutusa(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/mutusa.vale"], "unsafe-fast", 3)
def test_resilientv0_mutusa(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/mutusa.vale"], "resilient-v0", 3)
def test_resilientv1_mutusa(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/mutusa.vale"], "resilient-v1", 3)
def test_resilientv2_mutusa(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/mutusa.vale"], "resilient-v2", 3)
def test_resilientv3_mutusa(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/mutusa.vale"], "resilient-v3", 3)
def test_naiverc_mutusa(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/mutusa.vale"], "naive-rc", 3)
def test_assist_mutusalen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/mutusalen.vale"], "assist", 5)
def test_unsafefast_mutusalen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/mutusalen.vale"], "unsafe-fast", 5)
def test_resilientv0_mutusalen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/mutusalen.vale"], "resilient-v0", 5)
def test_resilientv1_mutusalen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/mutusalen.vale"], "resilient-v1", 5)
def test_resilientv2_mutusalen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/mutusalen.vale"], "resilient-v2", 5)
def test_resilientv3_mutusalen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/mutusalen.vale"], "resilient-v3", 5)
def test_naiverc_mutusalen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/mutusalen.vale"], "naive-rc", 5)
def test_assist_stradd(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/stradd.vale"], "assist", 42)
def test_unsafefast_stradd(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/stradd.vale"], "unsafe-fast", 42)
def test_resilientv0_stradd(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/stradd.vale"], "resilient-v0", 42)
def test_resilientv1_stradd(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/stradd.vale"], "resilient-v1", 42)
def test_resilientv2_stradd(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/stradd.vale"], "resilient-v2", 42)
def test_resilientv3_stradd(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/stradd.vale"], "resilient-v3", 42)
def test_naiverc_stradd(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/stradd.vale"], "naive-rc", 42)
def test_assist_strneq(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strneq.vale"], "assist", 42)
def test_unsafefast_strneq(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strneq.vale"], "unsafe-fast", 42)
def test_resilientv0_strneq(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strneq.vale"], "resilient-v0", 42)
def test_resilientv1_strneq(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strneq.vale"], "resilient-v1", 42)
def test_resilientv2_strneq(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strneq.vale"], "resilient-v2", 42)
def test_resilientv3_strneq(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strneq.vale"], "resilient-v3", 42)
def test_naiverc_strneq(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strneq.vale"], "naive-rc", 42)
def test_assist_lambdamut(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/lambdas/lambdamut.vale"], "assist", 42)
def test_unsafefast_lambdamut(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/lambdas/lambdamut.vale"], "unsafe-fast", 42)
def test_resilientv0_lambdamut(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/lambdas/lambdamut.vale"], "resilient-v0", 42)
def test_resilientv1_lambdamut(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/lambdas/lambdamut.vale"], "resilient-v1", 42)
def test_resilientv2_lambdamut(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/lambdas/lambdamut.vale"], "resilient-v2", 42)
def test_resilientv3_lambdamut(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/lambdas/lambdamut.vale"], "resilient-v3", 42)
def test_naiverc_lambdamut(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/lambdas/lambdamut.vale"], "naive-rc", 42)
def test_assist_strprint(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strprint.vale"], "assist", 42)
def test_unsafefast_strprint(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strprint.vale"], "unsafe-fast", 42)
def test_resilientv0_strprint(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strprint.vale"], "resilient-v0", 42)
def test_resilientv1_strprint(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strprint.vale"], "resilient-v1", 42)
def test_resilientv2_strprint(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strprint.vale"], "resilient-v2", 42)
def test_resilientv3_strprint(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strprint.vale"], "resilient-v3", 42)
def test_naiverc_strprint(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strprint.vale"], "naive-rc", 42)
def test_assist_inttostr(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/inttostr.vale"], "assist", 4)
def test_unsafefast_inttostr(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/inttostr.vale"], "unsafe-fast", 4)
def test_resilientv0_inttostr(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/inttostr.vale"], "resilient-v0", 4)
def test_resilientv1_inttostr(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/inttostr.vale"], "resilient-v1", 4)
def test_resilientv2_inttostr(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/inttostr.vale"], "resilient-v2", 4)
def test_resilientv3_inttostr(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/inttostr.vale"], "resilient-v3", 4)
def test_naiverc_inttostr(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/inttostr.vale"], "naive-rc", 4)
def test_assist_nestedif(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/nestedif.vale"], "assist", 42)
def test_unsafefast_nestedif(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/nestedif.vale"], "unsafe-fast", 42)
def test_resilientv0_nestedif(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/nestedif.vale"], "resilient-v0", 42)
def test_resilientv1_nestedif(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/nestedif.vale"], "resilient-v1", 42)
def test_resilientv2_nestedif(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/nestedif.vale"], "resilient-v2", 42)
def test_resilientv3_nestedif(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/nestedif.vale"], "resilient-v3", 42)
def test_naiverc_nestedif(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/nestedif.vale"], "naive-rc", 42)
def test_assist_unstackifyret(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/unstackifyret.vale"], "assist", 42)
def test_unsafefast_unstackifyret(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/unstackifyret.vale"], "unsafe-fast", 42)
def test_resilientv0_unstackifyret(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/unstackifyret.vale"], "resilient-v0", 42)
def test_resilientv1_unstackifyret(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/unstackifyret.vale"], "resilient-v1", 42)
def test_resilientv2_unstackifyret(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/unstackifyret.vale"], "resilient-v2", 42)
def test_resilientv3_unstackifyret(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/unstackifyret.vale"], "resilient-v3", 42)
def test_naiverc_unstackifyret(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/unstackifyret.vale"], "naive-rc", 42)
def test_assist_swapmutusadestroy(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/swapmutusadestroy.vale"], "assist", 42)
def test_unsafefast_swapmutusadestroy(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/swapmutusadestroy.vale"], "unsafe-fast", 42)
def test_resilientv0_swapmutusadestroy(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/swapmutusadestroy.vale"], "resilient-v0", 42)
def test_resilientv1_swapmutusadestroy(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/swapmutusadestroy.vale"], "resilient-v1", 42)
def test_resilientv2_swapmutusadestroy(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/swapmutusadestroy.vale"], "resilient-v2", 42)
def test_resilientv3_swapmutusadestroy(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/swapmutusadestroy.vale"], "resilient-v3", 42)
def test_naiverc_swapmutusadestroy(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/arrays/swapmutusadestroy.vale"], "naive-rc", 42)
def test_assist_unreachablemoot(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/unreachablemoot.vale"], "assist", 42)
def test_unsafefast_unreachablemoot(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/unreachablemoot.vale"], "unsafe-fast", 42)
def test_resilientv0_unreachablemoot(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/unreachablemoot.vale"], "resilient-v0", 42)
def test_resilientv1_unreachablemoot(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/unreachablemoot.vale"], "resilient-v1", 42)
def test_resilientv2_unreachablemoot(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/unreachablemoot.vale"], "resilient-v2", 42)
def test_resilientv3_unreachablemoot(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/unreachablemoot.vale"], "resilient-v3", 42)
def test_naiverc_unreachablemoot(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/unreachablemoot.vale"], "naive-rc", 42)
def test_assist_panic(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/panic.vale"], "assist", 255)
def test_unsafefast_panic(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/panic.vale"], "unsafe-fast", 255)
def test_resilientv0_panic(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/panic.vale"], "resilient-v0", 255)
def test_resilientv1_panic(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/panic.vale"], "resilient-v1", 255)
def test_resilientv2_panic(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/panic.vale"], "resilient-v2", 255)
def test_resilientv3_panic(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/panic.vale"], "resilient-v3", 255)
def test_naiverc_panic(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/panic.vale"], "naive-rc", 255)
def test_assist_panicnot(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/panicnot.vale"], "assist", 42)
def test_unsafefast_panicnot(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/panicnot.vale"], "unsafe-fast", 42)
def test_resilientv0_panicnot(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/panicnot.vale"], "resilient-v0", 42)
def test_resilientv1_panicnot(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/panicnot.vale"], "resilient-v1", 42)
def test_resilientv2_panicnot(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/panicnot.vale"], "resilient-v2", 42)
def test_resilientv3_panicnot(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/panicnot.vale"], "resilient-v3", 42)
def test_naiverc_panicnot(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/panicnot.vale"], "naive-rc", 42)
def test_assist_nestedblocks(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/nestedblocks.vale"], "assist", 42)
def test_unsafefast_nestedblocks(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/nestedblocks.vale"], "unsafe-fast", 42)
def test_resilientv0_nestedblocks(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/nestedblocks.vale"], "resilient-v0", 42)
def test_resilientv1_nestedblocks(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/nestedblocks.vale"], "resilient-v1", 42)
def test_resilientv2_nestedblocks(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/nestedblocks.vale"], "resilient-v2", 42)
def test_resilientv3_nestedblocks(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/nestedblocks.vale"], "resilient-v3", 42)
def test_naiverc_nestedblocks(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/nestedblocks.vale"], "naive-rc", 42)
def test_assist_weakDropThenLockStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/dropThenLockStruct.vale"], "assist", 42)
def test_unsafefast_weakDropThenLockStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/dropThenLockStruct.vale"], "unsafe-fast", 42)
def test_resilientv0_weakDropThenLockStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/dropThenLockStruct.vale"], "resilient-v0", 42)
def test_resilientv1_weakDropThenLockStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/dropThenLockStruct.vale"], "resilient-v1", 42)
def test_resilientv2_weakDropThenLockStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/dropThenLockStruct.vale"], "resilient-v2", 42)
def test_resilientv3_weakDropThenLockStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/dropThenLockStruct.vale"], "resilient-v3", 42)
def test_naiverc_weakDropThenLockStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/dropThenLockStruct.vale"], "naive-rc", 42)
def test_assist_weakLockWhileLiveStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/lockWhileLiveStruct.vale"], "assist", 7)
def test_unsafefast_weakLockWhileLiveStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/lockWhileLiveStruct.vale"], "unsafe-fast", 7)
def test_resilientv0_weakLockWhileLiveStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/lockWhileLiveStruct.vale"], "resilient-v0", 7)
def test_resilientv1_weakLockWhileLiveStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/lockWhileLiveStruct.vale"], "resilient-v1", 7)
def test_resilientv2_weakLockWhileLiveStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/lockWhileLiveStruct.vale"], "resilient-v2", 7)
def test_resilientv3_weakLockWhileLiveStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/lockWhileLiveStruct.vale"], "resilient-v3", 7)
def test_naiverc_weakLockWhileLiveStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/lockWhileLiveStruct.vale"], "naive-rc", 7)
def test_assist_weakFromLocalCRefStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromLocalCRefStruct.vale"], "assist", 7)
def test_unsafefast_weakFromLocalCRefStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromLocalCRefStruct.vale"], "unsafe-fast", 7)
def test_resilientv0_weakFromLocalCRefStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromLocalCRefStruct.vale"], "resilient-v0", 7)
def test_resilientv1_weakFromLocalCRefStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromLocalCRefStruct.vale"], "resilient-v1", 7)
def test_resilientv2_weakFromLocalCRefStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromLocalCRefStruct.vale"], "resilient-v2", 7)
def test_resilientv3_weakFromLocalCRefStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromLocalCRefStruct.vale"], "resilient-v3", 7)
def test_naiverc_weakFromLocalCRefStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromLocalCRefStruct.vale"], "naive-rc", 7)
def test_assist_weakFromCRefStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromCRefStruct.vale"], "assist", 7)
def test_unsafefast_weakFromCRefStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromCRefStruct.vale"], "unsafe-fast", 7)
def test_resilientv0_weakFromCRefStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromCRefStruct.vale"], "resilient-v0", 7)
def test_resilientv1_weakFromCRefStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromCRefStruct.vale"], "resilient-v1", 7)
def test_resilientv2_weakFromCRefStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromCRefStruct.vale"], "resilient-v2", 7)
def test_resilientv3_weakFromCRefStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromCRefStruct.vale"], "resilient-v3", 7)
def test_naiverc_weakFromCRefStruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromCRefStruct.vale"], "naive-rc", 7)
def test_assist_loadFromWeakable(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/loadFromWeakable.vale"], "assist", 7)
def test_unsafefast_loadFromWeakable(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/loadFromWeakable.vale"], "unsafe-fast", 7)
def test_resilientv0_loadFromWeakable(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/loadFromWeakable.vale"], "resilient-v0", 7)
def test_resilientv1_loadFromWeakable(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/loadFromWeakable.vale"], "resilient-v1", 7)
def test_resilientv2_loadFromWeakable(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/loadFromWeakable.vale"], "resilient-v2", 7)
def test_resilientv3_loadFromWeakable(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/loadFromWeakable.vale"], "resilient-v3", 7)
def test_naiverc_loadFromWeakable(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/loadFromWeakable.vale"], "naive-rc", 7)
def test_assist_weakDropThenLockInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/dropThenLockInterface.vale"], "assist", 42)
def test_unsafefast_weakDropThenLockInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/dropThenLockInterface.vale"], "unsafe-fast", 42)
def test_resilientv0_weakDropThenLockInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/dropThenLockInterface.vale"], "resilient-v0", 42)
def test_resilientv1_weakDropThenLockInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/dropThenLockInterface.vale"], "resilient-v1", 42)
def test_resilientv2_weakDropThenLockInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/dropThenLockInterface.vale"], "resilient-v2", 42)
def test_resilientv3_weakDropThenLockInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/dropThenLockInterface.vale"], "resilient-v3", 42)
def test_naiverc_weakDropThenLockInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/dropThenLockInterface.vale"], "naive-rc", 42)
def test_assist_weakLockWhileLiveInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/lockWhileLiveInterface.vale"], "assist", 7)
def test_unsafefast_weakLockWhileLiveInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/lockWhileLiveInterface.vale"], "unsafe-fast", 7)
def test_resilientv0_weakLockWhileLiveInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/lockWhileLiveInterface.vale"], "resilient-v0", 7)
def test_resilientv1_weakLockWhileLiveInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/lockWhileLiveInterface.vale"], "resilient-v1", 7)
def test_resilientv2_weakLockWhileLiveInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/lockWhileLiveInterface.vale"], "resilient-v2", 7)
def test_resilientv3_weakLockWhileLiveInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/lockWhileLiveInterface.vale"], "resilient-v3", 7)
def test_naiverc_weakLockWhileLiveInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/lockWhileLiveInterface.vale"], "naive-rc", 7)
def test_assist_weakFromLocalCRefInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromLocalCRefInterface.vale"], "assist", 7)
def test_unsafefast_weakFromLocalCRefInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromLocalCRefInterface.vale"], "unsafe-fast", 7)
def test_resilientv0_weakFromLocalCRefInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromLocalCRefInterface.vale"], "resilient-v0", 7)
def test_resilientv1_weakFromLocalCRefInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromLocalCRefInterface.vale"], "resilient-v1", 7)
def test_resilientv2_weakFromLocalCRefInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromLocalCRefInterface.vale"], "resilient-v2", 7)
def test_resilientv3_weakFromLocalCRefInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromLocalCRefInterface.vale"], "resilient-v3", 7)
def test_naiverc_weakFromLocalCRefInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromLocalCRefInterface.vale"], "naive-rc", 7)
def test_assist_weakFromCRefInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromCRefInterface.vale"], "assist", 7)
def test_unsafefast_weakFromCRefInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromCRefInterface.vale"], "unsafe-fast", 7)
def test_resilientv0_weakFromCRefInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromCRefInterface.vale"], "resilient-v0", 7)
def test_resilientv1_weakFromCRefInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromCRefInterface.vale"], "resilient-v1", 7)
def test_resilientv2_weakFromCRefInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromCRefInterface.vale"], "resilient-v2", 7)
def test_resilientv3_weakFromCRefInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromCRefInterface.vale"], "resilient-v3", 7)
def test_naiverc_weakFromCRefInterface(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/weakFromCRefInterface.vale"], "naive-rc", 7)
def test_assist_weakSelfMethodCallWhileLive(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/callWeakSelfMethodWhileLive.vale"], "assist", 42)
def test_unsafefast_weakSelfMethodCallWhileLive(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/callWeakSelfMethodWhileLive.vale"], "unsafe-fast", 42)
def test_resilientv0_weakSelfMethodCallWhileLive(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/callWeakSelfMethodWhileLive.vale"], "resilient-v0", 42)
def test_resilientv1_weakSelfMethodCallWhileLive(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/callWeakSelfMethodWhileLive.vale"], "resilient-v1", 42)
def test_resilientv2_weakSelfMethodCallWhileLive(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/callWeakSelfMethodWhileLive.vale"], "resilient-v2", 42)
def test_resilientv3_weakSelfMethodCallWhileLive(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/callWeakSelfMethodWhileLive.vale"], "resilient-v3", 42)
def test_naiverc_weakSelfMethodCallWhileLive(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/callWeakSelfMethodWhileLive.vale"], "naive-rc", 42)
def test_assist_weakSelfMethodCallAfterDrop(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/callWeakSelfMethodAfterDrop.vale"], "assist", 0)
def test_unsafefast_weakSelfMethodCallAfterDrop(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/callWeakSelfMethodAfterDrop.vale"], "unsafe-fast", 0)
def test_resilientv0_weakSelfMethodCallAfterDrop(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/callWeakSelfMethodAfterDrop.vale"], "resilient-v0", 0)
def test_resilientv1_weakSelfMethodCallAfterDrop(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/callWeakSelfMethodAfterDrop.vale"], "resilient-v1", 0)
def test_resilientv2_weakSelfMethodCallAfterDrop(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/callWeakSelfMethodAfterDrop.vale"], "resilient-v2", 0)
def test_resilientv3_weakSelfMethodCallAfterDrop(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/callWeakSelfMethodAfterDrop.vale"], "resilient-v3", 0)
def test_naiverc_weakSelfMethodCallAfterDrop(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/weaks/callWeakSelfMethodAfterDrop.vale"], "naive-rc", 0)
def test_assist_extern(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/extern.vale"], "assist", 4)
def test_unsafefast_extern(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/extern.vale"], "unsafe-fast", 4)
def test_resilientv0_extern(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/extern.vale"], "resilient-v0", 4)
def test_resilientv1_extern(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/extern.vale"], "resilient-v1", 4)
def test_resilientv2_extern(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/extern.vale"], "resilient-v2", 4)
def test_resilientv3_extern(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/extern.vale"], "resilient-v3", 4)
def test_naiverc_extern(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/extern.vale"], "naive-rc", 4)
# def test_assist_externtupleret(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externtupleret.vale", PATH_TO_SAMPLES + "programs/externs/externtupleret.c"], "assist", 42)
# def test_unsafefast_externtupleret(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externtupleret.vale", PATH_TO_SAMPLES + "programs/externs/externtupleret.c"], "unsafe-fast", 42)
# def test_resilientv0_externtupleret(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externtupleret.vale", PATH_TO_SAMPLES + "programs/externs/externtupleret.c"], "resilient-v0", 42)
# def test_resilientv1_externtupleret(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externtupleret.vale", PATH_TO_SAMPLES + "programs/externs/externtupleret.c"], "resilient-v1", 42)
# def test_resilientv2_externtupleret(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externtupleret.vale", PATH_TO_SAMPLES + "programs/externs/externtupleret.c"], "resilient-v2", 42)
# def test_resilientv3_externtupleret(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externtupleret.vale", PATH_TO_SAMPLES + "programs/externs/externtupleret.c"], "resilient-v3", 42)
# def test_naiverc_externtupleret(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externtupleret.vale", PATH_TO_SAMPLES + "programs/externs/externtupleret.c"], "naive-rc", 42)
def test_assist_externstructparam(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externstructparam.vale", PATH_TO_SAMPLES + "programs/externs/externstructparam.c"], "assist", 42)
# def test_unsafefast_externstructparam(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externstructparam.vale", PATH_TO_SAMPLES + "programs/externs/externstructparam.c"], "unsafe-fast", 42)
# def test_resilientv0_externstructparam(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externstructparam.vale", PATH_TO_SAMPLES + "programs/externs/externstructparam.c"], "resilient-v0", 42)
# def test_resilientv1_externstructparam(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externstructparam.vale", PATH_TO_SAMPLES + "programs/externs/externstructparam.c"], "resilient-v1", 42)
def test_resilientv2_externstructparam(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externstructparam.vale", PATH_TO_SAMPLES + "programs/externs/externstructparam.c"], "resilient-v2", 42)
def test_resilientv3_externstructparam(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externstructparam.vale", PATH_TO_SAMPLES + "programs/externs/externstructparam.c"], "resilient-v3", 42)
# def test_naiverc_externstructparam(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externstructparam.vale", PATH_TO_SAMPLES + "programs/externs/externstructparam.c"], "naive-rc", 42)
def test_assist_externstrlen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externstrlen.vale", PATH_TO_SAMPLES + "programs/externs/externstrlen.c"], "assist", 11)
def test_unsafefast_externstrlen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externstrlen.vale", PATH_TO_SAMPLES + "programs/externs/externstrlen.c"], "unsafe-fast", 11)
def test_resilientv0_externstrlen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externstrlen.vale", PATH_TO_SAMPLES + "programs/externs/externstrlen.c"], "resilient-v0", 11)
def test_resilientv1_externstrlen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externstrlen.vale", PATH_TO_SAMPLES + "programs/externs/externstrlen.c"], "resilient-v1", 11)
def test_resilientv2_externstrlen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externstrlen.vale", PATH_TO_SAMPLES + "programs/externs/externstrlen.c"], "resilient-v2", 11)
def test_resilientv3_externstrlen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externstrlen.vale", PATH_TO_SAMPLES + "programs/externs/externstrlen.c"], "resilient-v3", 11)
def test_naiverc_externstrlen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externstrlen.vale", PATH_TO_SAMPLES + "programs/externs/externstrlen.c"], "naive-rc", 11)
def test_assist_extretmutstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/extretmutstruct.vale", PATH_TO_SAMPLES + "programs/externs/extretmutstruct.c"], "assist", 42)
def test_unsafefast_extretmutstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/extretmutstruct.vale", PATH_TO_SAMPLES + "programs/externs/extretmutstruct.c"], "unsafe-fast", 42)
# def test_resilientv0_extretmutstruct(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/extretmutstruct.vale", PATH_TO_SAMPLES + "programs/externs/extretmutstruct.c"], "resilient-v0", 42)
def test_resilientv1_extretmutstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/extretmutstruct.vale", PATH_TO_SAMPLES + "programs/externs/extretmutstruct.c"], "resilient-v1", 42)
def test_resilientv2_extretmutstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/extretmutstruct.vale", PATH_TO_SAMPLES + "programs/externs/extretmutstruct.c"], "resilient-v2", 42)
def test_resilientv3_extretmutstruct(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/extretmutstruct.vale", PATH_TO_SAMPLES + "programs/externs/extretmutstruct.c"], "resilient-v3", 42)
# def test_naiverc_extretmutstruct(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/extretmutstruct.vale", PATH_TO_SAMPLES + "programs/externs/extretmutstruct.c"], "naive-rc", 42)
def test_assist_exportretvoid(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/exportretvoid.vale", PATH_TO_SAMPLES + "programs/externs/exportretvoid.c"], "assist", 42)
def test_unsafefast_exportretvoid(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/exportretvoid.vale", PATH_TO_SAMPLES + "programs/externs/exportretvoid.c"], "unsafe-fast", 42)
def test_resilientv0_exportretvoid(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/exportretvoid.vale", PATH_TO_SAMPLES + "programs/externs/exportretvoid.c"], "resilient-v0", 42)
def test_resilientv1_exportretvoid(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/exportretvoid.vale", PATH_TO_SAMPLES + "programs/externs/exportretvoid.c"], "resilient-v1", 42)
def test_resilientv2_exportretvoid(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/exportretvoid.vale", PATH_TO_SAMPLES + "programs/externs/exportretvoid.c"], "resilient-v2", 42)
def test_resilientv3_exportretvoid(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/exportretvoid.vale", PATH_TO_SAMPLES + "programs/externs/exportretvoid.c"], "resilient-v3", 42)
def test_naiverc_exportretvoid(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/exportretvoid.vale", PATH_TO_SAMPLES + "programs/externs/exportretvoid.c"], "naive-rc", 42)
def test_assist_strlen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strlen.vale"], "assist", 11)
def test_unsafefast_strlen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strlen.vale"], "unsafe-fast", 11)
def test_resilientv0_strlen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strlen.vale"], "resilient-v0", 11)
def test_resilientv1_strlen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strlen.vale"], "resilient-v1", 11)
def test_resilientv2_strlen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strlen.vale"], "resilient-v2", 11)
def test_resilientv3_strlen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strlen.vale"], "resilient-v3", 11)
def test_naiverc_strlen(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/strlen.vale"], "naive-rc", 11)
def test_assist_smallstr(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/strings/smallstr.vale"], "assist", 42)
def test_assist_invalidaccess(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/invalidaccess.vale"], "assist", 255)
def test_unsafefast_invalidaccess(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/invalidaccess.vale"], "unsafe-fast", 255)
def test_resilientv0_invalidaccess(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/invalidaccess.vale"], "resilient-v0", 255)
def test_resilientv1_invalidaccess(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/invalidaccess.vale"], "resilient-v1", -11)
def test_resilientv2_invalidaccess(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/invalidaccess.vale"], "resilient-v2", 255)
def test_resilientv3_invalidaccess(self) -> None:
self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/invalidaccess.vale"], "resilient-v3", -11)
# def test_naiverc_invalidaccess(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/invalidaccess.vale"], "naive-rc", 255)
# def test_assist_neverif(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/neverif.vale"], "assist", 42)
# def test_unsafefast_neverif(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/neverif.vale"], "unsafe-fast", 42)
# def test_resilientv0_neverif(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/neverif.vale"], "resilient-v0", 42)
# def test_resilientv1_neverif(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/neverif.vale"], "resilient-v1", 42)
# def test_resilientv2_neverif(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/neverif.vale"], "resilient-v2", 42)
# def test_resilientv3_neverif(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/neverif.vale"], "resilient3v2", 42)
# def test_naiverc_neverif(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/if/neverif.vale"], "naive-rc", 42)
# def test_assist_externtupleparam(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externtupleparam.vale", PATH_TO_SAMPLES + "programs/externs/externtupleparam.c"], "assist", 42)
# def test_unsafefast_externtupleparam(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externtupleparam.vale", PATH_TO_SAMPLES + "programs/externs/externtupleparam.c"], "unsafe-fast", 42)
# def test_resilientv0_externtupleparam(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externtupleparam.vale", PATH_TO_SAMPLES + "programs/externs/externtupleparam.c"], "resilient-v0", 42)
# def test_resilientv1_externtupleparam(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externtupleparam.vale", PATH_TO_SAMPLES + "programs/externs/externtupleparam.c"], "resilient-v1", 42)
# def test_resilientv2_externtupleparam(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externtupleparam.vale", PATH_TO_SAMPLES + "programs/externs/externtupleparam.c"], "resilient-v2", 42)
# def test_resilientv3_externtupleparam(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externtupleparam.vale", PATH_TO_SAMPLES + "programs/externs/externtupleparam.c"], "resilient3v2", 42)
# def test_naiverc_externtupleparam(self) -> None:
# self.compile_and_execute_and_expect_return_code([PATH_TO_SAMPLES + "programs/externs/externtupleparam.vale", PATH_TO_SAMPLES + "programs/externs/externtupleparam.c"], "naive-rc", 42)
if __name__ == '__main__':
unittest.main()
| 83.368817
| 198
| 0.765377
| 9,975
| 77,533
| 5.538045
| 0.020652
| 0.048116
| 0.10425
| 0.168024
| 0.966855
| 0.963416
| 0.933149
| 0.807339
| 0.807339
| 0.807339
| 0
| 0.016403
| 0.120117
| 77,533
| 929
| 199
| 83.458558
| 0.79336
| 0.099816
| 0
| 0
| 0
| 0
| 0.234682
| 0.171314
| 0
| 0
| 0
| 0
| 0.003836
| 1
| 0.466752
| false
| 0
| 0.011509
| 0.002558
| 0.485934
| 0.019182
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e9c367e00c70259b7e089a1fb39f160c11d5512c
| 82
|
py
|
Python
|
scripts/deploy_mocks.py
|
murano500k/smart_contract_lottery
|
92f6fcf5cbd2335db10fd1239646ba8d16e55e63
|
[
"MIT"
] | 32
|
2021-08-02T14:30:06.000Z
|
2022-03-28T09:22:27.000Z
|
scripts/deploy_mocks.py
|
murano500k/smart_contract_lottery
|
92f6fcf5cbd2335db10fd1239646ba8d16e55e63
|
[
"MIT"
] | 53
|
2021-09-20T18:23:41.000Z
|
2022-03-26T18:26:58.000Z
|
scripts/deploy_mocks.py
|
murano500k/smart_contract_lottery
|
92f6fcf5cbd2335db10fd1239646ba8d16e55e63
|
[
"MIT"
] | 66
|
2021-06-06T16:18:02.000Z
|
2022-03-28T07:24:47.000Z
|
from scripts.helpful_scripts import deploy_mocks
def main():
deploy_mocks()
| 13.666667
| 48
| 0.768293
| 11
| 82
| 5.454545
| 0.727273
| 0.366667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158537
| 82
| 5
| 49
| 16.4
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e9d6653ec3bf0f1c716523ccbe2157e4f1aae626
| 47
|
py
|
Python
|
milkCan/__init__.py
|
v2thegreat/milkCan
|
6e98df6f2b18c56aced308bae6b14ebdc900db3e
|
[
"MIT"
] | null | null | null |
milkCan/__init__.py
|
v2thegreat/milkCan
|
6e98df6f2b18c56aced308bae6b14ebdc900db3e
|
[
"MIT"
] | null | null | null |
milkCan/__init__.py
|
v2thegreat/milkCan
|
6e98df6f2b18c56aced308bae6b14ebdc900db3e
|
[
"MIT"
] | null | null | null |
from milkCan import milkCan
import quickTests
| 11.75
| 27
| 0.851064
| 6
| 47
| 6.666667
| 0.666667
| 0.65
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 3
| 28
| 15.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
75e4d4ad2e8057960779610f36cbb5a92e3d2082
| 162
|
py
|
Python
|
video.py
|
ArtSantana/python-video-converter
|
c7e704033017d591f3733e94a44cbf1564d362da
|
[
"MIT"
] | null | null | null |
video.py
|
ArtSantana/python-video-converter
|
c7e704033017d591f3733e94a44cbf1564d362da
|
[
"MIT"
] | null | null | null |
video.py
|
ArtSantana/python-video-converter
|
c7e704033017d591f3733e94a44cbf1564d362da
|
[
"MIT"
] | null | null | null |
class Video:
def __init__(self, files):
self.files = files
def setFiles(self, files):
self.files = files
def getFiles(self):
return self.files
| 18
| 28
| 0.666667
| 22
| 162
| 4.727273
| 0.409091
| 0.432692
| 0.25
| 0.346154
| 0.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0.228395
| 162
| 9
| 29
| 18
| 0.832
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0
| 0.142857
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
f96d35234a53cd83f18ac3c80b689cbdba19f166
| 18,746
|
py
|
Python
|
examples/nowcoder/SQL7/tests.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 5
|
2020-07-14T07:48:10.000Z
|
2021-12-20T21:20:10.000Z
|
examples/nowcoder/SQL7/tests.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 7
|
2021-03-26T03:13:38.000Z
|
2022-03-12T00:42:03.000Z
|
examples/nowcoder/SQL7/tests.py
|
zhengtong0898/django-decode
|
69680853a4a5b07f6a9c4b65c7d86b2d401a92b1
|
[
"MIT"
] | 1
|
2021-02-16T07:04:25.000Z
|
2021-02-16T07:04:25.000Z
|
from datetime import date
from django.db import connections
from django.test import TestCase, TransactionTestCase
from .models import salaries
from django.db.models.aggregates import Count
# Create your tests here.
class SimpleTest(TransactionTestCase):
reset_sequences = True
def prepare_data(self):
# 建表语句
# CREATE TABLE `sql7_salaries` (
# `emp_no` INT ( 11 ) NOT NULL AUTO_INCREMENT,
# `salary` INT ( 11 ) NOT NULL,
# `from_date` date NOT NULL,
# `to_date` date NOT NULL,
# PRIMARY KEY ( `emp_no` )
# ) ENGINE = INNODB DEFAULT CHARSET = utf8mb4;
# 一次只能插入一条数据,
# 如果想要插入多条数据, 需要采用 executemany 配合 insert into sql1_employees values (xxx), (xxx), (xxx);
cursor = connections['default'].cursor()
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,60117,'1986-06-26','1987-06-26');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,62102,'1987-06-26','1988-06-25');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,66074,'1988-06-25','1989-06-25');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,66596,'1989-06-25','1990-06-25');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,66961,'1990-06-25','1991-06-25');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,71046,'1991-06-25','1992-06-24');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,74333,'1992-06-24','1993-06-24');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,75286,'1993-06-24','1994-06-24');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,75994,'1994-06-24','1995-06-24');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,76884,'1995-06-24','1996-06-23');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,80013,'1996-06-23','1997-06-23');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,81025,'1997-06-23','1998-06-23');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,81097,'1998-06-23','1999-06-23');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,84917,'1999-06-23','2000-06-22');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,85112,'2000-06-22','2001-06-22');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,85097,'2001-06-22','2002-06-22');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10001,88958,'2002-06-22','9999-01-01');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10002,72527,'1996-08-03','1997-08-03');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10002,72527,'1997-08-03','1998-08-03');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10002,72527,'1998-08-03','1999-08-03');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10002,72527,'1999-08-03','2000-08-02');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10002,72527,'2000-08-02','2001-08-02');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10002,72527,'2001-08-02','9999-01-01');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10003,40006,'1995-12-03','1996-12-02');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10003,43616,'1996-12-02','1997-12-02');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10003,43466,'1997-12-02','1998-12-02');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10003,43636,'1998-12-02','1999-12-02');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10003,43478,'1999-12-02','2000-12-01');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10003,43699,'2000-12-01','2001-12-01');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10003,43311,'2001-12-01','9999-01-01');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,40054,'1986-12-01','1987-12-01');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,42283,'1987-12-01','1988-11-30');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,42542,'1988-11-30','1989-11-30');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,46065,'1989-11-30','1990-11-30');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,48271,'1990-11-30','1991-11-30');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,50594,'1991-11-30','1992-11-29');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,52119,'1992-11-29','1993-11-29');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,54693,'1993-11-29','1994-11-29');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,58326,'1994-11-29','1995-11-29');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,60770,'1995-11-29','1996-11-28');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,62566,'1996-11-28','1997-11-28');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,64340,'1997-11-28','1998-11-28');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,67096,'1998-11-28','1999-11-28');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,69722,'1999-11-28','2000-11-27');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,70698,'2000-11-27','2001-11-27');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10004,74057,'2001-11-27','9999-01-01');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10005,78228,'1989-09-12','1990-09-12');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10005,82621,'1990-09-12','1991-09-12');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10005,83735,'1991-09-12','1992-09-11');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10005,85572,'1992-09-11','1993-09-11');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10005,85076,'1993-09-11','1994-09-11');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10005,86050,'1994-09-11','1995-09-11');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10005,88448,'1995-09-11','1996-09-10');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10005,88063,'1996-09-10','1997-09-10');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10005,89724,'1997-09-10','1998-09-10');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10005,90392,'1998-09-10','1999-09-10');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10005,90531,'1999-09-10','2000-09-09');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10005,91453,'2000-09-09','2001-09-09');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10005,94692,'2001-09-09','9999-01-01');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10006,43311,'1990-08-05','1991-08-05');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10006,43311,'1991-08-05','1992-08-04');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10006,43311,'1992-08-04','1993-08-04');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10006,43311,'1993-08-04','1994-08-04');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10006,43311,'1994-08-04','1995-08-04');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10006,43311,'1995-08-04','1996-08-03');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10006,43311,'1996-08-03','1997-08-03');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10006,43311,'1997-08-03','1998-08-03');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10006,43311,'1998-08-03','1999-08-03');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10006,43311,'1999-08-03','2000-08-02');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10006,43311,'2000-08-02','2001-08-02');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10006,43311,'2001-08-02','9999-01-01');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10007,56724,'1989-02-10','1990-02-10');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10007,60740,'1990-02-10','1991-02-10');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10007,62745,'1991-02-10','1992-02-10');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10007,63475,'1992-02-10','1993-02-09');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10007,63208,'1993-02-09','1994-02-09');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10007,64563,'1994-02-09','1995-02-09');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10007,68833,'1995-02-09','1996-02-09');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10007,70220,'1996-02-09','1997-02-08');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10007,73362,'1997-02-08','1998-02-08');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10007,75582,'1998-02-08','1999-02-08');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10007,79513,'1999-02-08','2000-02-08');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10007,80083,'2000-02-08','2001-02-07');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10007,84456,'2001-02-07','2002-02-07');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10007,88070,'2002-02-07','9999-01-01');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10008,46671,'1998-03-11','1999-03-11');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10008,48584,'1999-03-11','2000-03-10');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10008,52668,'2000-03-10','2000-07-31');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,60929,'1985-02-18','1986-02-18');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,64604,'1986-02-18','1987-02-18');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,64780,'1987-02-18','1988-02-18');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,66302,'1988-02-18','1989-02-17');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,69042,'1989-02-17','1990-02-17');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,70889,'1990-02-17','1991-02-17');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,71434,'1991-02-17','1992-02-17');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,74612,'1992-02-17','1993-02-16');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,76518,'1993-02-16','1994-02-16');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,78335,'1994-02-16','1995-02-16');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,80944,'1995-02-16','1996-02-16');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,82507,'1996-02-16','1997-02-15');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,85875,'1997-02-15','1998-02-15');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,89324,'1998-02-15','1999-02-15');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,90668,'1999-02-15','2000-02-15');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,93507,'2000-02-15','2001-02-14');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,94443,'2001-02-14','2002-02-14');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10009,95409,'2002-02-14','9999-01-01');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10010,94409,'1996-11-24','1997-11-24');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10010,94409,'1997-11-24','1998-11-24');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10010,94409,'1998-11-24','1999-11-24');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10010,94409,'1999-11-24','2000-11-23');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10010,94409,'2000-11-23','2001-11-23');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10010,94409,'2001-11-23','9999-01-01');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10002,72527,'1985-11-21','1996-08-03');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10003,15828,'1986-08-28','1995-12-03');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10006,43311,'1989-06-02','1990-08-05');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10006,43311,'1994-09-15','1998-03-11');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10010,94409,'1989-08-24','1996-11-24');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10008,25828,'1994-09-15','1998-03-11');""")
cursor.execute("""INSERT INTO sql7_salaries (emp_no, salary, from_date, to_date) VALUES(10011,25828,'1990-01-22','9999-01-01');""")
def clear_data(self):
cursor = connections['default'].cursor()
cursor.execute('delete from sql7_salaries;')
def pre_assert(self, qs):
# 断言:
# 10001|17
# 10004|16
# 10009|18
self.assertEqual(len(qs), 3)
self.assertEqual(qs[0].get('emp_no'), 10001)
self.assertEqual(qs[0].get('t'), 17)
self.assertEqual(qs[1].get('emp_no'), 10004)
self.assertEqual(qs[1].get('t'), 16)
self.assertEqual(qs[2].get('emp_no'), 10009)
self.assertEqual(qs[2].get('t'), 18)
def test_sql_7_1(self):
# 准备数据
self.prepare_data()
# 期望SQL
# select emp_no, count(to_date) as t
# from salaries
# group by emp_no
# having t > 4;
#
# 生成SQL
# SELECT `SQL7_salaries`.`emp_no`,
# COUNT(`SQL7_salaries`.`emp_no`) AS `t`
# FROM `SQL7_salaries`
# GROUP BY `SQL7_salaries`.`emp_no`
# HAVING COUNT(`SQL7_salaries`.`emp_no`) > 15
# ORDER BY NULL
qs = (salaries.objects.values('emp_no')
.annotate(t=Count('emp_no'))
.filter(t__gt=15))
# 断言
self.pre_assert(qs)
# 清空
self.clear_data()
| 99.712766
| 139
| 0.672357
| 2,872
| 18,746
| 4.209958
| 0.083565
| 0.054586
| 0.153833
| 0.174345
| 0.785295
| 0.771235
| 0.765197
| 0.765197
| 0.765197
| 0.765197
| 0
| 0.202742
| 0.136189
| 18,746
| 187
| 140
| 100.245989
| 0.543939
| 0.036594
| 0
| 0.013699
| 0
| 0.815068
| 0.723541
| 0.303639
| 0
| 0
| 0
| 0
| 0.061644
| 1
| 0.027397
| false
| 0
| 0.034247
| 0
| 0.075342
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
f97933cfd8bb1304e0c59cca3c30534250e201e8
| 1,892
|
py
|
Python
|
instrumentation/opentelemetry-instrumentation-django/tests/views.py
|
epsagon/opentelemetry-python-contrib
|
2671ff53c8643ad55dcf78dad072f2f0b82e84e1
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 3
|
2019-11-26T14:31:09.000Z
|
2020-01-09T23:04:49.000Z
|
instrumentation/opentelemetry-instrumentation-django/tests/views.py
|
epsagon/opentelemetry-python-contrib
|
2671ff53c8643ad55dcf78dad072f2f0b82e84e1
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 16
|
2020-02-07T10:01:02.000Z
|
2020-04-06T22:03:31.000Z
|
instrumentation/opentelemetry-instrumentation-django/tests/views.py
|
epsagon/opentelemetry-python-contrib
|
2671ff53c8643ad55dcf78dad072f2f0b82e84e1
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 5
|
2020-02-05T14:59:12.000Z
|
2020-04-03T15:34:16.000Z
|
from django.http import HttpResponse
def traced(request): # pylint: disable=unused-argument
return HttpResponse()
def traced_template(request, year): # pylint: disable=unused-argument
return HttpResponse()
def error(request): # pylint: disable=unused-argument
raise ValueError("error")
def excluded(request): # pylint: disable=unused-argument
return HttpResponse()
def excluded_noarg(request): # pylint: disable=unused-argument
return HttpResponse()
def excluded_noarg2(request): # pylint: disable=unused-argument
return HttpResponse()
def route_span_name(
request, *args, **kwargs
): # pylint: disable=unused-argument
return HttpResponse()
def response_with_custom_header(request):
response = HttpResponse()
response["custom-test-header-1"] = "test-header-value-1"
response["custom-test-header-2"] = "test-header-value-2"
return response
async def async_traced(request): # pylint: disable=unused-argument
return HttpResponse()
async def async_traced_template(
request, year
): # pylint: disable=unused-argument
return HttpResponse()
async def async_error(request): # pylint: disable=unused-argument
raise ValueError("error")
async def async_excluded(request): # pylint: disable=unused-argument
return HttpResponse()
async def async_excluded_noarg(request): # pylint: disable=unused-argument
return HttpResponse()
async def async_excluded_noarg2(request): # pylint: disable=unused-argument
return HttpResponse()
async def async_route_span_name(
request, *args, **kwargs
): # pylint: disable=unused-argument
return HttpResponse()
async def async_with_custom_header(request):
response = HttpResponse()
response.headers["custom-test-header-1"] = "test-header-value-1"
response.headers["custom-test-header-2"] = "test-header-value-2"
return response
| 24.25641
| 76
| 0.734144
| 222
| 1,892
| 6.153153
| 0.162162
| 0.133236
| 0.194729
| 0.27672
| 0.947291
| 0.937042
| 0.937042
| 0.85798
| 0.795022
| 0.483163
| 0
| 0.006258
| 0.155391
| 1,892
| 77
| 77
| 24.571429
| 0.848561
| 0.236258
| 0
| 0.511111
| 0
| 0
| 0.116084
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.177778
| false
| 0
| 0.022222
| 0.133333
| 0.511111
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
f9a2bae27f884e0eea1d014f0f9474353748823b
| 12,025
|
py
|
Python
|
tests/integration/pypyr/pipelinerunner_int_test.py
|
Reskov/pypyr
|
67bc1795493c19e648e12f776a644f92e3bd2fc8
|
[
"Apache-2.0"
] | 261
|
2020-08-18T19:31:29.000Z
|
2022-03-31T14:54:06.000Z
|
tests/integration/pypyr/pipelinerunner_int_test.py
|
Reskov/pypyr
|
67bc1795493c19e648e12f776a644f92e3bd2fc8
|
[
"Apache-2.0"
] | 73
|
2020-08-14T20:21:14.000Z
|
2022-03-14T14:00:16.000Z
|
tests/integration/pypyr/pipelinerunner_int_test.py
|
Reskov/pypyr
|
67bc1795493c19e648e12f776a644f92e3bd2fc8
|
[
"Apache-2.0"
] | 15
|
2020-09-30T12:15:50.000Z
|
2022-03-30T07:25:40.000Z
|
"""pipelinerunner.py integration tests."""
import logging
from pathlib import Path
import pytest
from unittest.mock import call
from pypyr import pipelinerunner
from pypyr.cache import pipelinecache
from pypyr.errors import KeyNotInContextError
from tests.common.utils import patch_logger
working_dir_tests = Path(Path.cwd(), 'tests')
@pytest.fixture
def pipeline_cache_reset():
"""Invoke for every test function in the module."""
pipelinecache.pipeline_cache.clear()
yield
pipelinecache.pipeline_cache.clear()
# region smoke
def test_pipeline_runner_main(pipeline_cache_reset):
"""Smoke test for pipeline runner main.
Strictly speaking this is an integration test, not a unit test.
"""
pipelinerunner.main(pipeline_name='smoke',
pipeline_context_input=None,
working_dir=working_dir_tests)
# endregion smoke
# region main
def test_pipeline_runner_main_all(pipeline_cache_reset):
"""Run main with all arguments as expected."""
expected_notify_output = ['sg1', 'sg1.2', 'success_handler']
with patch_logger('pypyr.steps.echo', logging.NOTIFY) as mock_log:
pipelinerunner.main(
pipeline_name='pipelines/api/main-all',
pipeline_context_input=['A', 'B', 'C'],
working_dir=working_dir_tests,
groups=['sg1'],
success_group='sh',
failure_group='fh',
loader='arbpack.naivefileloader')
assert mock_log.mock_calls == [call(v) for v in expected_notify_output]
def test_pipeline_runner_main_all_with_failure(pipeline_cache_reset):
"""Run main with all arguments as expected with runtime error."""
expected_notify_output = ['sg2', 'success_handler', 'fh']
with patch_logger('pypyr.steps.echo', logging.NOTIFY) as mock_log:
with pytest.raises(ValueError) as err:
pipelinerunner.main(
pipeline_name='pipelines/api/main-all',
pipeline_context_input=['A', 'B', 'C', 'raise on sh'],
working_dir=working_dir_tests,
groups=['sg2'],
success_group='sh',
failure_group='fh',
loader='arbpack.naivefileloader')
assert str(err.value) == "err from sh"
assert mock_log.mock_calls == [call(v) for v in expected_notify_output]
def test_pipeline_runner_main_minimal():
"""Run main with minimal arguments as expected."""
expected_notify_output = ['steps', 'argList==None', 'on_success']
# working_dir will default to repo root rather than test root
with patch_logger('pypyr.steps.echo', logging.NOTIFY) as mock_log:
pipelinerunner.main('tests/pipelines/api/main-all')
assert mock_log.mock_calls == [call(v) for v in expected_notify_output]
def test_pipeline_runner_main_with_failure():
"""Run main with failure argument as expected."""
expected_notify_output = ['sg3', 'fh']
with patch_logger('pypyr.steps.echo', logging.NOTIFY) as mock_log:
with pytest.raises(ValueError) as err:
pipelinerunner.main(
pipeline_name='tests/pipelines/api/main-all',
groups=['sg3'],
failure_group='fh')
assert str(err.value) == "err from sg3"
assert mock_log.mock_calls == [call(v) for v in expected_notify_output]
def test_pipeline_runner_main_minimal_with_failure_handled():
"""Run main minimal with failure argument as expected."""
expected_notify_output = ['steps', 'on_success', 'on_failure']
with patch_logger('pypyr.steps.echo', logging.NOTIFY) as mock_log:
pipelinerunner.main(
pipeline_name='tests/pipelines/api/main-all',
pipeline_context_input=['A', 'B', 'C', 'raise on success'])
assert mock_log.mock_calls == [call(v) for v in expected_notify_output]
def test_pipeline_runner_main_with_failure_handled():
"""Run main with failure argument as expected."""
expected_notify_output = ['sg3', 'on_failure']
with patch_logger('pypyr.steps.echo', logging.NOTIFY) as mock_log:
pipelinerunner.main(pipeline_name='tests/pipelines/api/main-all',
groups=['sg3'],
failure_group='on_failure')
assert mock_log.mock_calls == [call(v) for v in expected_notify_output]
# endregion main
# region main_with_context
def test_pipeline_runner_main_with_context_all(pipeline_cache_reset):
"""Run main with context with all arguments as expected."""
expected_notify_output = ['sg1', 'sg1.2', 'success_handler']
with patch_logger('pypyr.steps.echo', logging.NOTIFY) as mock_log:
out = pipelinerunner.main_with_context(
pipeline_name='pipelines/api/main-all',
dict_in={'argList': ['A', 'B', 'C']},
working_dir=working_dir_tests,
groups=['sg1'],
success_group='sh',
failure_group='fh',
loader='arbpack.naivefileloader')
assert mock_log.mock_calls == [call(v) for v in expected_notify_output]
assert out.pipeline_name == 'pipelines/api/main-all'
assert out.working_dir == working_dir_tests
assert out == {'argList': ['A', 'B', 'C'], 'set_in_pipe': 123}
def test_pipeline_runner_main_with_context_all_with_failure(
pipeline_cache_reset):
"""Run main with context - all arguments as expected with runtime error."""
expected_notify_output = ['sg2', 'success_handler', 'fh']
with patch_logger('pypyr.steps.echo', logging.NOTIFY) as mock_log:
with pytest.raises(ValueError) as err:
pipelinerunner.main_with_context(
pipeline_name='pipelines/api/main-all',
dict_in={'argList': ['A', 'B', 'C', 'raise on sh']},
working_dir=working_dir_tests,
groups=['sg2'],
success_group='sh',
failure_group='fh',
loader='arbpack.naivefileloader')
assert str(err.value) == "err from sh"
assert mock_log.mock_calls == [call(v) for v in expected_notify_output]
def test_pipeline_runner_main_with_context_minimal():
"""Run main with context with minimal arguments as expected."""
# Not having argList==None proves context_parser didn't run.
expected_notify_output = ['steps', 'argList not exist', 'on_success']
# working_dir will default to repo root rather than test root
with patch_logger('pypyr.steps.echo', logging.NOTIFY) as mock_log:
out = pipelinerunner.main_with_context('tests/pipelines/api/main-all')
assert mock_log.mock_calls == [call(v) for v in expected_notify_output]
assert out.pipeline_name == 'tests/pipelines/api/main-all'
assert out.working_dir == Path.cwd()
assert out == {'set_in_pipe': 456}
# somewhat arbitrary check if behaves like Context()
out.assert_key_has_value('set_in_pipe', 'caller')
def test_pipeline_runner_main_with_context_with_failure():
"""Run main with context with failure argument as expected."""
expected_notify_output = ['sg3', 'fh']
with patch_logger('pypyr.steps.echo', logging.NOTIFY) as mock_log:
with pytest.raises(ValueError) as err:
pipelinerunner.main_with_context(
pipeline_name='tests/pipelines/api/main-all',
groups=['sg3'],
failure_group='fh')
assert str(err.value) == "err from sg3"
assert mock_log.mock_calls == [call(v) for v in expected_notify_output]
def test_pipeline_runner_main_with_context_relative_working_dir(
pipeline_cache_reset):
"""Run main with context with relative working directory."""
expected_notify_output = ['steps', 'on_success', 'on_failure']
with patch_logger('pypyr.steps.echo', logging.NOTIFY) as mock_log:
out = pipelinerunner.main_with_context(
pipeline_name='api/main-all',
dict_in={'argList': ['A', 'B', 'C', 'raise on success']},
working_dir='tests/pipelines/')
assert mock_log.mock_calls == [call(v) for v in expected_notify_output]
assert out.pipeline_name == 'api/main-all'
assert out.working_dir == Path('tests/pipelines/')
assert len(out) == 4
assert out['argList'] == ['A', 'B', 'C', 'raise on success']
assert out['set_in_pipe'] == 456
assert out['py'] == "raise ValueError('err from on_success')"
assert len(out['runErrors']) == 1
out_run_error = out['runErrors'][0]
assert out_run_error
assert out_run_error['col'] == 5
assert out_run_error['customError'] == {}
assert out_run_error['description'] == 'err from on_success'
assert repr(out_run_error['exception']) == repr(ValueError(
'err from on_success'))
assert out_run_error['line'] == 74
assert out_run_error['name'] == 'ValueError'
assert out_run_error['step'] == 'pypyr.steps.py'
assert out_run_error['swallowed'] is False
# somewhat arbitrary check if behaves like Context()
out.assert_key_has_value('set_in_pipe', 'caller')
def test_pipeline_runner_main_with_context_minimal_with_failure_handled():
"""Run main with context minimal with failure argument as expected."""
expected_notify_output = ['steps', 'on_success', 'on_failure']
with patch_logger('pypyr.steps.echo', logging.NOTIFY) as mock_log:
out = pipelinerunner.main_with_context(
pipeline_name='tests/pipelines/api/main-all',
dict_in={'argList': ['A', 'B', 'C', 'raise on success']})
assert mock_log.mock_calls == [call(v) for v in expected_notify_output]
assert out.pipeline_name == 'tests/pipelines/api/main-all'
assert out.working_dir == Path.cwd()
assert len(out) == 4
assert out['argList'] == ['A', 'B', 'C', 'raise on success']
assert out['set_in_pipe'] == 456
assert out['py'] == "raise ValueError('err from on_success')"
assert len(out['runErrors']) == 1
out_run_error = out['runErrors'][0]
assert out_run_error
assert out_run_error['col'] == 5
assert out_run_error['customError'] == {}
assert out_run_error['description'] == 'err from on_success'
assert repr(out_run_error['exception']) == repr(ValueError(
'err from on_success'))
assert out_run_error['line'] == 74
assert out_run_error['name'] == 'ValueError'
assert out_run_error['step'] == 'pypyr.steps.py'
assert out_run_error['swallowed'] is False
# somewhat arbitrary check if behaves like Context()
out.assert_key_has_value('set_in_pipe', 'caller')
def test_pipeline_runner_main_with_context_with_failure_handled():
"""Run main with context with failure argument as expected."""
expected_notify_output = ['sg3', 'on_failure']
with patch_logger('pypyr.steps.echo', logging.NOTIFY) as mock_log:
out = pipelinerunner.main_with_context(
pipeline_name='tests/pipelines/api/main-all',
groups=['sg3'],
failure_group='on_failure')
assert mock_log.mock_calls == [call(v) for v in expected_notify_output]
assert out.pipeline_name == 'tests/pipelines/api/main-all'
assert out.working_dir == Path.cwd()
assert len(out) == 2
assert out['py'] == "raise ValueError('err from sg3')"
assert len(out['runErrors']) == 1
out_run_error = out['runErrors'][0]
assert out_run_error
assert out_run_error['col'] == 5
assert out_run_error['customError'] == {}
assert out_run_error['description'] == 'err from sg3'
assert repr(out_run_error['exception']) == repr(ValueError('err from sg3'))
assert out_run_error['line'] == 50
assert out_run_error['name'] == 'ValueError'
assert out_run_error['step'] == 'pypyr.steps.py'
assert out_run_error['swallowed'] is False
# somewhat arbitrary check if behaves like Context()
with pytest.raises(KeyNotInContextError) as err:
out.assert_key_has_value('set_in_pipe', 'arbcaller')
assert str(err.value) == ("context['set_in_pipe'] doesn't exist. It must "
"exist for arbcaller.")
# endregion main_with_context
| 40.762712
| 79
| 0.673181
| 1,585
| 12,025
| 4.850473
| 0.095899
| 0.050338
| 0.042924
| 0.05307
| 0.877081
| 0.852367
| 0.834807
| 0.823621
| 0.789152
| 0.781738
| 0
| 0.005749
| 0.204407
| 12,025
| 294
| 80
| 40.901361
| 0.797847
| 0.114678
| 0
| 0.714286
| 0
| 0
| 0.191578
| 0.050455
| 0
| 0
| 0
| 0
| 0.364532
| 1
| 0.073892
| false
| 0
| 0.039409
| 0
| 0.1133
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ddcabf144da05cb2612bc40f46a6db5b0fce1190
| 146
|
py
|
Python
|
mysite/myapp/views.py
|
KlimDos/jango
|
780d4b2460d1893440922a37c098c705a89c9393
|
[
"MIT"
] | null | null | null |
mysite/myapp/views.py
|
KlimDos/jango
|
780d4b2460d1893440922a37c098c705a89c9393
|
[
"MIT"
] | 6
|
2020-02-12T02:36:56.000Z
|
2022-02-10T10:46:51.000Z
|
mysite/myapp/views.py
|
KlimDos/Django
|
780d4b2460d1893440922a37c098c705a89c9393
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, render_to_response
# Create your views here.
def index(request):
return render_to_response('index.html')
| 29.2
| 55
| 0.794521
| 21
| 146
| 5.333333
| 0.761905
| 0.142857
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123288
| 146
| 5
| 56
| 29.2
| 0.875
| 0.157534
| 0
| 0
| 0
| 0
| 0.081967
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
ddd78f21d3ea53f0cc2ae1356f909d222916cbfb
| 48
|
py
|
Python
|
AccountsApp/models/__init__.py
|
Kolynes/uzu-accounts-app
|
21c182ec8497fe4fa1ca651fb6c622b59579aba2
|
[
"MIT"
] | null | null | null |
AccountsApp/models/__init__.py
|
Kolynes/uzu-accounts-app
|
21c182ec8497fe4fa1ca651fb6c622b59579aba2
|
[
"MIT"
] | null | null | null |
AccountsApp/models/__init__.py
|
Kolynes/uzu-accounts-app
|
21c182ec8497fe4fa1ca651fb6c622b59579aba2
|
[
"MIT"
] | 1
|
2020-10-28T12:32:28.000Z
|
2020-10-28T12:32:28.000Z
|
from .VerificationModel import VerificationModel
| 48
| 48
| 0.916667
| 4
| 48
| 11
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 48
| 1
| 48
| 48
| 0.977778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fd25530a1870eb066e920d9ef241510843534f48
| 167
|
py
|
Python
|
python/testData/inspections/PyUnresolvedReferencesInspection/instanceAttributeCreatedInsideWithStatement.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyUnresolvedReferencesInspection/instanceAttributeCreatedInsideWithStatement.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyUnresolvedReferencesInspection/instanceAttributeCreatedInsideWithStatement.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
class Foo(object):
def __init__(self):
with open('b.py'):
self.scope = "a"
pass
def get_scope(self):
return self.scope
| 20.875
| 28
| 0.508982
| 21
| 167
| 3.809524
| 0.714286
| 0.225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.371257
| 167
| 8
| 29
| 20.875
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0.029762
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.142857
| 0
| 0.142857
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 7
|
fd3c8075587387b9be51512c95ead9fae4f6d5b9
| 3,987
|
py
|
Python
|
src/main/python/tmp.py
|
macdaliot/epic
|
784fc2c2b3f730a6417d0f25587208c8b44a8d2c
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/tmp.py
|
macdaliot/epic
|
784fc2c2b3f730a6417d0f25587208c8b44a8d2c
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/tmp.py
|
macdaliot/epic
|
784fc2c2b3f730a6417d0f25587208c8b44a8d2c
|
[
"Apache-2.0"
] | null | null | null |
#import pymongo
import sys
import os
#from pymongo import MongoClient
from moveBatch import moveBatch
rString = moveBatch([0.6878930867438103,0.8601166855980099,0.787110182202453,0.7390269160608786,0.4253127065169371,0.19227850181195683,0.8668621382475246,0.35351966164249027,0.6386985808256926,0.6961745812829627,0.8774666649623053,0.4578269074953959,0.8469025155592574,0.2295233666392943,0.41475043255346455,0.41452301691588866,0.9497644223588801,0.39859404799206166,0.13007450205946403,0.48281395251028814,0.9496599275801957,0.2040560147553544,0.7029078925620592,0.06184296351755403,0.1565882267348142,0.5294230411776896,0.38220937324012194,0.3015317607608988,0.4537164521068271,0.4724982617814867,0.8925551704146,0.9886789233263202,0.4039591154143697,0.2922422555297888,0.1356173571982966,0.9438235658523189,0.05540467637952862,0.2221123119160474,0.9003225761119498,0.24912815450192127,0.9556365416567435,0.18176558094533213,0.8906165326308739,0.6465701101386941,0.38211392936547794,0.8279822916756414,0.2703638985013399,0.23094815686786752,0.2765272477842563,0.4324427465750619,0.1696412504345004,0.5674710295277494,0.7945845587196589,0.19461058730627012,0.7405462126924823,0.15871291164096235,0.6207613741335313,0.16833041763943268,0.7881315725212222,0.28725661402755254,0.6543149417352083,0.6836575372026573,0.8714872706304676,0.677748952540966,0.1528712772351838,0.31981325637237124,0.7487649783908923,0.8534535249258528,0.24389657785333252,0.19622737654502576,0.709200610693595,0.5129657253912986,0.7358393006568441,0.902691869280299,0.4075539022482628,0.8098961407338221,0.22890935000728851,0.191141026428451,0.08033917758652642,0.36512765358830224,0.9324758371446565,0.9405955950228375,0.6083175551642721,0.13680430525851628,0.2642989194096761,0.8413291711132264,0.5400296805335848,0.5566128382000908,0.3708435596891778,0.7751094641964554,0.6579531816031864,0.8296211551506667,0.2868364118982625,0.5626365713807969,0.7697021693686322,0.666287113262972,0.5832405022822058,0.6002111690927473,0.11472130583094675,0.7615779588736598,0.7830255549096513,0.916417296698604,0.32159597629887116,0.6765726374345089,0.5772114983356831,0.47681882614089943,0.9264930523736195,0.566208452868077,0.4667494142412266,0.139391452615231,0.07189142097744938,0.6492819332813303,0.4972447403995335,0.7869270544098179,0.05408728741769475,0.35523232435740515,0.7013656567055466,0.5157757873365,0.7870328980735396,0.6482524998144824,0.6667935289437594,0.7965621058550655,0.6042762354506398,0.7480946274922586,0.975834116649035,0.1899830664860146,0.8200120168052442,0.7414568947535527,0.10911226342769342,0.09919903102913574,0.39165003727739855,0.6884956185233906,0.5817345658986113,0.18723401007559304,0.5844591123187897,0.5965772540461634,0.46181473467769873,0.11291759177681227,0.682533057379911,0.06884541672235678,0.9702501059927093,0.6310251259212536,0.1319748896220213,0.8508562491556401,0.3216499607305021,0.9181441506597288,0.32992469933727,0.8654810910225812,0.8581526259126884,0.5356736349201504,0.3397845609472452,0.9883116750500982,0.32962594602393636,0.08967357924851826,0.42087423431018656,0.5533549437424008,0.8398720250775282,0.20812548169721068,0.7989168190675126,0.7592089360141622,0.33764711767198974,0.4826654069614792,0.4322261356289877,0.6624222716602849,0.7403835221751166,0.5939987054697939,0.06019047072000405,0.23650752649871376,0.7874968334944554,0.5910603288078299,0.562674507400604,0.5159424490225444,0.3047250729319375,0.36124180192098765,0.8077011124748155,0.22753576546947296,0.8591339311081287,0.3895936175195155,0.9221465060738222,0.17632535790779957,0.8147650317942514,0.4291205417237758,0.4055167577107477,0.315268101557423,0.43877513036994187,0.7897177479621977,0.6968466722561508,0.26272113743515746,0.5927763317756529,0.26404123540084623,0.8810324157243855,0.3511261474462716,0.34672438698974906,0.8810246816191002,0.46156377995431075,0.20644917033210497,0.10293974546200002,0.08488803093858077,0.6385899042307392,0.1964058938095774],0.0)
print str(rString)
| 498.375
| 3,861
| 0.891397
| 421
| 3,987
| 8.441805
| 0.501188
| 0.007316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.867053
| 0.005769
| 3,987
| 7
| 3,862
| 569.571429
| 0.029516
| 0.011538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.6
| null | null | 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
b5be404cba68c3c0bc4da0f507fe96d1d8380661
| 295
|
py
|
Python
|
src/message.py
|
shimech/snkrs-pass-checker
|
178ce65815adf5a342fff57a464b3b6073e53227
|
[
"MIT"
] | 2
|
2021-03-04T04:44:54.000Z
|
2021-03-22T14:53:06.000Z
|
src/message.py
|
shimech/snkrs-pass-checker
|
178ce65815adf5a342fff57a464b3b6073e53227
|
[
"MIT"
] | null | null | null |
src/message.py
|
shimech/snkrs-pass-checker
|
178ce65815adf5a342fff57a464b3b6073e53227
|
[
"MIT"
] | null | null | null |
class Message:
snkrs_pass_message = "<!channel> 【SNKRS PASS Flying Get!!!】" + "\n"
snkrs_pass_message += "SNKRS PASSが発行されました!急げ!!:snkrspass:" + "\n"
snkrs_pass_message += "{}" + "\n"
@classmethod
def make_message(cls, url):
return cls.snkrs_pass_message.format(url)
| 32.777778
| 71
| 0.644068
| 36
| 295
| 5.027778
| 0.5
| 0.248619
| 0.353591
| 0.187845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19322
| 295
| 8
| 72
| 36.875
| 0.760504
| 0
| 0
| 0
| 0
| 0
| 0.267797
| 0.094915
| 0.428571
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.571429
| 0
| 0.142857
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 7
|
b5e8b40e2db707816ba3e18d9aa38586cbef9382
| 10,034
|
py
|
Python
|
app/tests/v1/test_sales.py
|
kwanj-k/storemanager-API
|
e51511545a717341a7b1eb100eb3eab625a8b011
|
[
"MIT"
] | 1
|
2019-05-08T08:39:08.000Z
|
2019-05-08T08:39:08.000Z
|
app/tests/v1/test_sales.py
|
kwanj-k/storemanager-API
|
e51511545a717341a7b1eb100eb3eab625a8b011
|
[
"MIT"
] | 2
|
2019-10-21T17:56:01.000Z
|
2019-10-29T07:36:39.000Z
|
app/tests/v1/test_sales.py
|
kwanj-k/storemanager-API
|
e51511545a717341a7b1eb100eb3eab625a8b011
|
[
"MIT"
] | null | null | null |
"""
A module to contain all sale related test cases
"""
# Standard library imports
import json
# Local application imports
from .base_config import Settings
from app.api.v1.models.db import Db
s_url = "/api/v1/sales"
p_url = "/api/v1/products"
class TestSales(Settings):
"""
p_data to contain product data
"""
p_data = {
"name": "monster",
"inventory": 24,
"price": 165
}
s_data = {
"number": 3
}
m_data = {
"number": 387
}
ns_data = {
"number": 12
}
str_data = {
"number": 'tr'
}
unwanted_data = {
"number": 12,
"yes": "yes"
}
no_data = {
}
def test_make_sale(self):
"""Test for the make sale endpoint."""
login = self.autheniticate()
token = json.loads(login.data.decode()).get('token')
self.app.post(p_url,
data=json.dumps(self.p_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
p = Db.get_product('monster')
res = self.app.post("/api/v1/products/{}".format(p.id),
data=json.dumps(self.s_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
res1 = json.loads(res.data.decode())
self.assertEqual(res1['status'],'Success!')
self.assertEqual(res.status_code, 201)
def test_make_sale_with_str_number(self):
"""Test for the make sale endpoint."""
login = self.autheniticate()
token = json.loads(login.data.decode()).get('token')
self.app.post(p_url,
data=json.dumps(self.p_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
p = Db.get_product('monster')
res = self.app.post("/api/v1/products/{}".format(p.id),
data=json.dumps(self.str_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
res1 = json.loads(res.data.decode())
self.assertEqual(res1['message'],'Name of the product can not be an integer')
self.assertEqual(res.status_code, 406)
def test_selling_non_existing_product(self):
"""Test for the make sale endpoint."""
login = self.autheniticate()
token = json.loads(login.data.decode()).get('token')
res = self.app.post("/api/v1/products/1",
data=json.dumps(self.s_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
res1 = json.loads(res.data.decode())
self.assertEqual(res1['message'],'Product does not exist')
self.assertEqual(res.status_code, 404)
def test_make_sale_with_morenum_than_available(self):
"""Test for the make sale endpoint."""
login = self.autheniticate()
token = json.loads(login.data.decode()).get('token')
self.app.post(p_url,
data=json.dumps(self.p_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
p = Db.get_product('monster')
res = self.app.post("/api/v1/products/{}".format(p.id),
data=json.dumps(self.m_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
res1 = json.loads(res.data.decode())
self.assertEqual(res1['message'],'There are only 24 monster available')
self.assertEqual(res.status_code, 400)
def test_make_sale_with_no_num(self):
"""Test for the make sale endpoint."""
login = self.autheniticate()
token = json.loads(login.data.decode()).get('token')
self.app.post(p_url,
data=json.dumps(self.p_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
p = Db.get_product('monster')
res = self.app.post("/api/v1/products/{}".format(p.id),
data=json.dumps(self.no_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
res1 = json.loads(res.data.decode())
self.assertEqual(res1['message'],'Please provide the number field')
self.assertEqual(res.status_code, 406)
def test_make_sale_with_unwanted_data(self):
"""Test for the make sale endpoint."""
login = self.autheniticate()
token = json.loads(login.data.decode()).get('token')
self.app.post(p_url,
data=json.dumps(self.p_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
p = Db.get_product('monster')
res = self.app.post("/api/v1/products/{}".format(p.id),
data=json.dumps(self.unwanted_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
res1 = json.loads(res.data.decode())
self.assertEqual(res1['message'],'The field yes is not required')
self.assertEqual(res.status_code, 400)
def test_get_all_sales(self):
"""Test for the get all sales endpoint."""
login = self.autheniticate()
token = json.loads(login.data.decode()).get('token')
self.app.post(p_url,
data=json.dumps(self.p_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
p = Db.get_product('monster')
self.app.post("/api/v1/products/{}".format(p.id),
data=json.dumps(self.s_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
res = self.app.get(
s_url, headers=dict(
Authorization="Bearer " + token))
res1 = json.loads(res.data.decode())
self.assertEqual(res1['status'],'Success!')
self.assertEqual(res.status_code, 200)
def test_get_sales_with_no_records(self):
"""Test for the get all sales endpoint."""
login = self.autheniticate()
token = json.loads(login.data.decode()).get('token')
res = self.app.get(
s_url, headers=dict(
Authorization="Bearer " + token))
res1 = json.loads(res.data.decode())
self.assertEqual(res1['message'],'There are no sale records')
self.assertEqual(res.status_code, 404)
def test_get_sale_by_id(self):
"""Test for the get sale by id endpoint."""
login = self.autheniticate()
token = json.loads(login.data.decode()).get('token')
self.app.post(p_url,
data=json.dumps(self.p_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
p = Db.get_product('monster')
self.app.post("/api/v1/products/{}".format(p.id),
data=json.dumps(self.s_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
s = Db.get_s_by_product('monster')
res = self.app.get("/api/v1/sales/{}".format(s.id),
headers=dict(Authorization="Bearer " + token))
res1 = json.loads(res.data.decode())
self.assertEqual(res1['status'],'Success!')
self.assertEqual(res.status_code, 200)
def test_sale_delete(self):
"""Test for the delete sale by id endpoint."""
login = self.autheniticate()
token = json.loads(login.data.decode()).get('token')
self.app.post(p_url,
data=json.dumps(self.p_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
p = Db.get_product('monster')
self.app.post("/api/v1/products/{}".format(p.id),
data=json.dumps(self.s_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
s = Db.get_s_by_product('monster')
res = self.app.delete("/api/v1/sales/{}".format(s.id),
headers=dict(Authorization="Bearer " + token))
res1 = json.loads(res.data.decode())
self.assertEqual(res1['status'],'Deleted!')
self.assertEqual(res.status_code, 200)
def test_sale_update(self):
"""Test for the update sale by id endpoint."""
login = self.autheniticate()
token = json.loads(login.data.decode()).get('token')
self.app.post(p_url,
data=json.dumps(self.p_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
p = Db.get_product('monster')
self.app.post("/api/v1/products/{}".format(p.id),
data=json.dumps(self.s_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
s = Db.get_s_by_product('monster')
res = self.app.put("/api/v1/sales/{}".format(s.id),
data=json.dumps(self.ns_data),
headers=dict(Authorization="Bearer " + token),
content_type='application/json')
res1 = json.loads(res.data.decode())
self.assertEqual(res1['status'],'Success!')
self.assertEqual(res.status_code, 200)
| 43.064378
| 85
| 0.549731
| 1,135
| 10,034
| 4.74185
| 0.102203
| 0.031215
| 0.107023
| 0.133779
| 0.86845
| 0.851914
| 0.848384
| 0.845225
| 0.802861
| 0.794314
| 0
| 0.012603
| 0.312039
| 10,034
| 232
| 86
| 43.25
| 0.767058
| 0.052023
| 0
| 0.71066
| 0
| 0
| 0.132634
| 0
| 0
| 0
| 0
| 0
| 0.111675
| 1
| 0.055838
| false
| 0
| 0.015228
| 0
| 0.111675
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b5ed8106e50c1cbf1f8a592754786851d36c1e7d
| 8,710
|
py
|
Python
|
SBaaS_quantification/stage01_quantification_QCs_postgresql_models.py
|
dmccloskey/SBaaS_quantification
|
b2a9c7a9a0d318f22ff20e311f94c213852ba914
|
[
"MIT"
] | null | null | null |
SBaaS_quantification/stage01_quantification_QCs_postgresql_models.py
|
dmccloskey/SBaaS_quantification
|
b2a9c7a9a0d318f22ff20e311f94c213852ba914
|
[
"MIT"
] | null | null | null |
SBaaS_quantification/stage01_quantification_QCs_postgresql_models.py
|
dmccloskey/SBaaS_quantification
|
b2a9c7a9a0d318f22ff20e311f94c213852ba914
|
[
"MIT"
] | null | null | null |
from SBaaS_base.postgresql_orm_base import *
class data_stage01_quantification_LLOQAndULOQ(Base):
__tablename__ = 'data_stage01_quantification_LLOQAndULOQ'
id = Column(Integer, Sequence('data_stage01_quantification_lloqanduloq_id_seq'), primary_key=True)
experiment_id = Column(String(50))
sample_name = Column(String(100))
component_group_name = Column(String(100))
component_name = Column(String(500))
calculated_concentration = Column(Float)
calculated_concentration_units = Column(String(20))
correlation = Column(Float)
lloq = Column(Float);
uloq = Column(Float);
points = Column(Float);
used_ = Column(Boolean);
__table_args__ = (UniqueConstraint('experiment_id','sample_name','component_name','calculated_concentration_units'),
)
def __init__(self,
row_dict_I,
):
self.experiment_id=row_dict_I['experiment_id'];
self.used_=row_dict_I['used_'];
self.points=row_dict_I['points'];
self.uloq=row_dict_I['uloq'];
self.lloq=row_dict_I['lloq'];
self.correlation=row_dict_I['correlation'];
self.calculated_concentration_units=row_dict_I['calculated_concentration_units'];
self.calculated_concentration=row_dict_I['calculated_concentration'];
self.component_name=row_dict_I['component_name'];
self.component_group_name=row_dict_I['component_group_name'];
self.sample_name=row_dict_I['sample_name'];
def __set__row__(self, experiment_id_I, sample_name_I, component_group_name_I, component_name_I,
calculated_concentration_I, calculated_concentration_units_I,
correlation_I, lloq_I, uloq_I, points_I, used_I):
self.experiment_id = experiment_id_I;
self.sample_name = sample_name_I;
self.component_group_name = component_group_name_I;
self.component_name = component_name_I;
self.calculated_concentration = calculated_concentration_I;
self.calculated_concentration_units = calculated_concentration_units_I;
self.correlation = correlation_I;
self.lloq = lloq_I;
self.uloq = uloq_I;
self.points = points_I;
self.used_ = used_I;
def __repr__dict__(self):
return {'id':self.id,
'experiment_id':self.experiment_id,
'sample_name':self.sample_name,
'component_group_name':self.component_group_name,
'component_name':self.component_name,
'calculated_concentration':self.calculated_concentration,
'calculated_concentration_units':self.calculated_concentration_units,
'correlation':self.correlation,
'lloq':self.lloq,
'uloq':self.uloq,
'points':self.points,
'used_':self.used_,
}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage01_quantification_dilutions(Base):
__tablename__ = 'data_stage01_quantification_dilutions'
id = Column(Integer, Sequence('data_stage01_quantification_dilutions_id_seq'), primary_key=True)
experiment_id = Column(String(50))
sample_id = Column(String(100))
component_group_name = Column(String(100))
component_name = Column(String(500))
n_replicates = Column(Integer)
calculated_concentration_average = Column(Float)
calculated_concentration_cv = Column(Float)
calculated_concentration_units = Column(String(20))
__table_args__ = (UniqueConstraint('experiment_id','sample_id','component_name','calculated_concentration_units'),
)
def __init__(self,
row_dict_I,
):
self.calculated_concentration_average=row_dict_I['calculated_concentration_average'];
self.calculated_concentration_cv=row_dict_I['calculated_concentration_cv'];
self.calculated_concentration_units=row_dict_I['calculated_concentration_units'];
self.experiment_id=row_dict_I['experiment_id'];
self.sample_id=row_dict_I['sample_id'];
self.component_group_name=row_dict_I['component_group_name'];
self.component_name=row_dict_I['component_name'];
self.n_replicates=row_dict_I['n_replicates'];
def __set__row__(self, experiment_id_I, sample_id_I, component_group_name_I, component_name_I, n_replicates_I,
calculated_concentration_average_I, calculated_concentration_cv_I, calculated_concentration_units_I):
self.experiment_id = experiment_id_I;
self.sample_id = sample_id_I;
self.component_group_name = component_group_name_I;
self.component_name = component_name_I;
self.n_replicates = n_replicates_I;
self.calculated_concentration_average = calculated_concentration_average_I;
self.calculated_concentration_cv = calculated_concentration_cv_I;
self.calculated_concentration_units = calculated_concentration_units_I;
def __repr__dict__(self):
return {'id':self.id,
'experiment_id':self.experiment_id,
'sample_id':self.sample_id,
'component_group_name':self.component_group_name,
'component_name':self.component_name,
'n_replicates':self.n_replicates,
'calculated_concentration_average':self.calculated_concentration_average,
'calculated_concentration_cv':self.calculated_concentration_cv,
'calculated_concentration_units':self.calculated_concentration_units,
}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
class data_stage01_quantification_QCs(Base):
__tablename__ = 'data_stage01_quantification_QCs'
id = Column(Integer, Sequence('data_stage01_quantification_qcs_id_seq'), primary_key=True)
experiment_id = Column(String(50))
sample_name_abbreviation = Column(String(100))
sample_dilution = Column(Float, primary_key=True);
component_group_name = Column(String(100))
component_name = Column(String(500))
n_replicates = Column(Integer)
calculated_concentration_average = Column(Float)
calculated_concentration_CV = Column(Float)
calculated_concentration_units = Column(String(20))
__table_args__ = (UniqueConstraint('experiment_id','sample_name_abbreviation','component_name','calculated_concentration_units'),
)
def __init__(self,
row_dict_I,
):
self.sample_dilution=row_dict_I['sample_dilution'];
self.calculated_concentration_units=row_dict_I['calculated_concentration_units'];
self.calculated_concentration_CV=row_dict_I['calculated_concentration_CV'];
self.calculated_concentration_average=row_dict_I['calculated_concentration_average'];
self.n_replicates=row_dict_I['n_replicates'];
self.component_name=row_dict_I['component_name'];
self.component_group_name=row_dict_I['component_group_name'];
self.sample_name_abbreviation=row_dict_I['sample_name_abbreviation'];
self.experiment_id=row_dict_I['experiment_id'];
def __set__row__(self, experiment_id_I, sample_name_abbreviation_I, sample_dilution_I, component_group_name_I, component_name_I, n_replicates_I,
calculated_concentration_average_I, calculated_concentration_CV_I, calculated_concentration_units_I):
self.experiment_id = experiment_id_I;
self.sample_name_abbreviation = sample_name_abbreviation_I;
self.sample_dilution = sample_dilution_I;
self.component_group_name = component_group_name_I;
self.component_name = component_name_I;
self.n_replicates = n_replicates_I;
self.calculated_concentration_average = calculated_concentration_average_I;
self.calculated_concentration_CV = calculated_concentration_CV_I;
self.calculated_concentration_units = calculated_concentration_units_I;
def __repr__dict__(self):
return {'id':self.id,
'experiment_id':self.experiment_id,
'sample_name_abbreviation':self.sample_name_abbreviation,
'sample_dilution':self.sample_dilution,
'component_group_name':self.component_group_name,
'component_name':self.component_name,
'n_replicates':self.n_replicates,
'calculated_concentration_average':self.calculated_concentration_average,
'calculated_concentration_CV':self.calculated_concentration_CV,
'calculated_concentration_units':self.calculated_concentration_units}
def __repr__json__(self):
return json.dumps(self.__repr__dict__())
| 50.346821
| 148
| 0.718944
| 995
| 8,710
| 5.728643
| 0.063317
| 0.270351
| 0.043509
| 0.050526
| 0.850175
| 0.778596
| 0.778596
| 0.73386
| 0.69614
| 0.653333
| 0
| 0.008128
| 0.194834
| 8,710
| 173
| 149
| 50.346821
| 0.804506
| 0
| 0
| 0.531646
| 0
| 0
| 0.163949
| 0.098852
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.006329
| null | null | 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b5ef8d4f808defc512d85cd90ca9fcea50c60f0c
| 36,091
|
py
|
Python
|
Python/windwardrestapi/Model/Document.py
|
windward-studios/Windward-REST-version-2-Clients
|
8fd467e6f4ece6fcc435609ffb23448d07af3131
|
[
"MIT"
] | null | null | null |
Python/windwardrestapi/Model/Document.py
|
windward-studios/Windward-REST-version-2-Clients
|
8fd467e6f4ece6fcc435609ffb23448d07af3131
|
[
"MIT"
] | 1
|
2020-10-12T20:32:05.000Z
|
2020-10-12T20:38:04.000Z
|
Python/windwardrestapi/Model/Document.py
|
windward-studios/Windward-REST-version-2-Clients
|
8fd467e6f4ece6fcc435609ffb23448d07af3131
|
[
"MIT"
] | null | null | null |
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x08\x00\x55\x0d\x0d\x0a\x04\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\xf5\x22\x00\x00\x00\x00\x00\x10\xa4\x37\xc4\x0d\x3e\x9e\x01\xef\x93\x59\x47\x94\x74\x15\x68\x81\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x25\xf7\xcc\xa3\x14\x0e\x42\x94\xb3\xab\x3e\x43\x77\x67\x26\x48\x1a\x71\x4b\x64\xa1\x5f\xc4\xdd\x5f\x1d\x1b\x9b\xa9\xfd\x40\x2a\x4a\x8a\xa0\xd5\x36\x1e\x00\x4c\x5f\x3a\x6c\x3e\xb2\xfc\x2a\x68\x0b\xcc\xd9\xb6\xb2\x77\x75\x9c\x67\x9d\xeb\x35\x17\xa9\x79\x84\x3d\x1a\xc6\x4d\xb4\x56\x21\x6d\x1e\xac\xbd\x63\xcb\x04\x36\x51\x30\xce\x4f\x2a\x80\x5d\x8c\x52\x8d\x4a\x17\x05\xa3\xdb\xad\x6a\x9f\xfc\x08\x03\x9d\xfd\x94\x9b\xa5\x21\x16\xb1\x82\x40\x38\x37\xf5\x55\x2a\x85\x44\x96\x8e\x2c\x82\x02\x1a\x72\x4b\x7c\xde\x29\x94\xe6\x17\x74\x5f\x6a\xf0\xa8\xb4\x28\x9f\x7d\x93\x3e\xba\x9e\x31\x49\xfb\xf0\x17\x0e\x2c\x62\xb7\xdd\x90\xc1\x72\x8a\x3b\xad\x22\xb9\x1f\xb7\xde\x5a\x05\x85\xb2\x52\xf6\xc9\xd1\xb6\x59\x47\xf3\x0c\x5f\xaa\x3d\xd6\xe7\x29\xeb\xd0\x68\x73\x6f\x8a\xfb\x4e\x83\x42\x7e\xe6\xde\x17\xab\x95\x79\x75\x7f\x27\x4a\x92\x25\xc7\xd1\x57\x09\x97\xc0\x13\x3b\x1c\xf5\xa7\x5c\x42\x0e\x09\x6d\x41\x4b\x72\x1d\x81\x34\xe5\x58\x92\x1a\xda\xf6\x9c\xa0\x1e\x3e\xa3\x00\xf6\x14\xc3\xbc\x82\x5e\x3c\xb8\xb6\xd7\x0f\xd4\x7c\x5d\xe5\x06\x8e\x5b\x6c\x03\xff\xcc\xbd\xe0\x6c\xf1\xe7\xdc\x2d\xfb\xe0\xa1\x2c\xb7\xff\x03\x1c\x72\x26\x98\x6d\xf4\x99\xa8\xef\x52\x22\xdc\x36\x33\x18\xb6\xa8\x22\x47\xa5\xbf\x80\x65\x0f\x38\x17\xa0\x04\x51\xe8\x8b\xd1\x57\x05\xf7\xa8\xc0\xc4\x6c\xfc\xac\x26\x1f\x4d\x1e\xb6\x98\x3c\x54\x6e\x6f\xfb\x7d\xe1\x88\x81\x76\xb2\x99\xcc\xf6\xa2\x1f\xa7\x71\x56\x57\x56\x2e\x6c\x0c\x10\x49\xcc\x43\xbb\xa8\x29\x23\xf8\x95\xc8\x85\x0e\x1a\x6e\x51\xba\xfd\x28\x13\x62\x1d\x84\x11\xc4\x8a\x6a\xd2\x3a\x2b\xe3\x1b\x55\xc1\xf8\xea\x39\x5e\x53\x27\xaf\x30\x77\x7d\x44\xe6\xa2\x78\xba\x00\x6e\x5c\xba\xc2\x8d\x8f\x6c\xd3\xd4\x58\x15\x2b\xd1\x1d\xcf\xf0\x43\xbe\x4e\x76\xb0\xa5\xd2\x96\x46\xeb\x3a\xef\x95\x28\x4a\x02\xb8\x27\x48\x4a\xa6\x75\xbc\xc2\x17\x5a\x54\x0e\x5f\x70\xef\x70\xf8\xc1\xb9\x26\x2d\x00\x57\x7f\x5e\xb3\x36\x53\x77\xdd\x70\xd9\x8c\xf5\xc4\x1a\x65\x01\x9a\x44\xc6\x69\x23\x8b\x60\xfd\xfc\x2b\xfd\xd0\xe4\x92\xc0\x7f\x5b\x14\x96\x97\x3e\xb9\x47\xf8\xc8\x79\x01\x2d\xfd\x8d\xc7\x5d\x0e\x3c\x6d\xab\x90\x22\xc4\x70\x81\xed\xbe\x36\xc0\x72\x43\xc4\x8e\xbd\xc9\x26\x5a\xf5\xa1\x6e\x4e\x20\x8c\x77\xeb\xae\xdd\xcd\x3e\x54\xa1\xc3\x86\x05\x4d\xed\x2c\x51\x57\x8a\x5a\xaf\x7a\x00\x3f\x7a\xca\xce\xbf\x2a\x84\x24\xb4\xcf\x79\x2e\xb6\xe8\x0e\xe0\xb8\xb9\x41\x56\xd9\xb5\xb5\x1e\xab\xb1\x44\x82\x7b\x8c\x5e\x15\xcd\xd4\x95\xdf\xd1\x14\x6b\x77\x75\xa4\xd0\xf2\x5f\xf1\xd4\x8b\x48\x04\xfe\x67\x82\xa0\x71\xb9\x88\x63\x36\x55\xa2\xac\x08\x2c\x48\xb5\x98\x29\xdb\x80\x19\x53\x50\x4d\x7e\x6d\x91\x95\xaa\xae\xec\x30\x2a\x85\xb2\xd3\x0b\xeb\x16\x0d\x66\xe9\xd7\xd6\x7e\xd8\xfa\xa3\x0a\x6d\x0e\x1a\x75\xf0\x8f\x5f\x13\xe2\x6d\x86\xb5\x11\xbf\x4d\xc1\x1d\xdc\x1a\x8f\x73\xd5\x7b\x2a\xf0\xe6\x9b\x0e\x29\xfd\x96\x51\x8b\x44\xce\xde\xb6\xcb\x0b\x4a\x9f\x6b\xc9\xb2\xbf\x96\x6a\x66\x09\xb0\x69\x24\x8d\x6c\xad\xc1\xf7\xc6\xff\x9e\xc5\xb7\x8a\xc9\x52\xb8\x9c\xe9\xd3\xdc\xbf\xb0\x6a\xe5\x9d\x27\x7d\xb8\x8c\x04\xfe\x1f\x9d\x12\xb4\xe7\xeb\x7c\x5b\xae\x04\x60\x9a\x7f\xe8\xa4\x0b\xce\x5a\xde\xde\xd0\x0e\xa3\x70\xc2\x99\xf3\xe0\x46\x8d\x2f\x88\xa8\x7c\x24\xe1\x55\x9f\x47\x76\xbc\x60\xe3\xeb\x5d\x20\x34\xb6\x64\xd6\xd4\x51\x51\xca\x2b\xd9\x60\xfd\x77\x3f\xf4\x3b\xf6\x77\x8a\xb0\xe1\x9f\xa4\x63\xb8\x20\xd0\x08\xac\x8e\xf8\xe8\x30\x4e\xf5\xca\x99\x53\x4d\x41\x74\x5b\xbe\x64\xd9\xd2\xf8\xe1\x87\x38\x69\x4e\xa2\xe4\xdb\xdf\x59\x22\x88\x54\xcb\x9a\xd8\x3e\x91\x98\x32\xe9\x9b\x0c\x03\x08\xf0\xa0\x5d\x61\xb9\x4e\xae\x3e\xbf\xa4\x53\xe1\x43\x08\x0f\xd6\x2e\x1e\xde\x87\xe8\x01\xc5\xcf\x36\x29\xc8\xa0\x56\x6e\xdd\x36\xf3\xdb\x72\x66\xdf\x83\x66\x54\x19\x95\x19\x5f\x8c\xad\xcf\x2b\xa9\x76\x18\x37\xb2\x0c\x15\xa0\x5d\x06\xad\x28\xb1\x81\x4f\x2a\xf1\x95\x5b\x6a\xf7\x63\x17\x5a\x3c\xa6\xbc\x96\xd3\x17\x17\x35\x27\x69\x12\x49\x0e\xf7\x5d\xfc\xc5\x2e\x97\x2f\x91\x06\x56\xc2\xa7\x4d\x49\xef\x78\x26\x51\x51\xa4\xc7\x1e\x8b\x2f\xc9\x83\x56\xd6\x0a\x73\xb4\x2e\xaa\x81\x2b\xc6\xfe\x52\x17\x25\x1f\x0a\xd3\xd0\xd0\xd7\xf2\xe1\xc4\x49\x87\x45\x40\x99\x3a\x68\xc9\xf8\x04\x8d\x57\x21\xce\xa3\x10\xa2\xae\x9a\x04\x0b\x17\x8b\xca\x6f\x02\xac\xda\xd4\x91\x3d\x13\x12\x95\x00\x7d\x58\x2a\x25\xf4\xf8\x60\x47\x84\x41\x6a\x99\xde\xa4\x6e\x55\xd9\x31\x05\xb9\x26\xcb\x30\x4e\xf1\x48\xf4\x53\x5a\x4f\x78\xee\x34\x0c\x15\x4e\x85\x0e\xad\x31\x9e\x45\xba\x8a\x9b\xbc\x17\x4b\x69\x45\x19\x4d\x54\x63\x1a\x88\x2e\x76\xfb\x6e\x0a\x18\x6d\xe2\x3c\x32\x14\xad\x97\x71\x9c\xf5\xcf\xac\xd4\x39\x82\x76\x2d\xb3\xd4\x76\x6c\x2f\x67\x55\xad\x38\xc4\xca\xc0\xb8\xbc\x62\x5d\x71\x64\x17\x8b\x3e\x0a\x85\x82\x29\xae\x96\xe5\x25\xd9\x78\xb6\x83\x0a\x39\xf7\x3f\x74\xbf\xf4\x0b\xe8\xd2\xba\x18\x94\xd4\x86\x02\x6f\xba\xb9\xe9\xf5\x22\x7a\x46\xd8\x74\x29\x22\x41\xb7\xba\xee\xe0\x67\x79\xe8\x6f\x41\x97\xbb\xfc\xa8\xb3\x10\x5e\xa3\x33\x60\xd4\xa1\x99\x66\x3f\xeb\x15\x4d\xcd\x5c\xac\xb0\xc7\x4c\x31\x8d\x46\x03\xbc\x29\x2a\x48\xf1\xee\x47\xc2\x72\x99\x94\x52\x95\xd8\x5d\x28\x6c\x9a\x18\xdc\xc1\x7d\x64\x81\x63\xd0\xf4\xc3\x73\x55\x3b\x15\x2b\xa0\x69\xba\x78\xdb\x49\x35\x1a\x13\xff\x0c\xee\x1a\x45\xd0\x6a\x5e\x2c\x4d\x37\x92\xe5\xdb\x44\x7c\x35\x1e\xda\xb0\x99\xa6\xaf\xa8\xc5\x81\x4d\x36\x8d\x96\x77\x59\x4f\x32\x91\x2a\x7f\xec\x1c\x82\xe8\x4f\x64\x77\xf7\x92\x66\x4c\xd3\x44\xfa\x40\xc3\xda\x3b\xa8\xd1\x30\xed\xac\x3a\x70\x68\x1d\x04\x8e\xb4\x63\x9d\x0b\x4a\x4d\x6f\x58\xba\x01\x57\xf3\x98\xfe\xa8\x35\xd9\x4c\xcb\xe8\xf3\x8f\x47\x72\xa6\xac\xf3\x48\xd7\x38\xc3\xf6\x06\xe8\x3a\x5a\x35\x27\xb1\x00\x5a\xfa\x23\xb7\x4e\xdf\x6c\x62\xa6\x0b\x42\x28\x72\x80\xff\x21\xec\x02\x7f\x52\x6a\x00\x98\xaa\x4c\xdc\xb5\x72\x3a\x50\xf5\x8c\x1d\x83\xb8\x23\xeb\xe8\x46\x5f\x03\xa4\x75\xc1\x8c\xa8\xc0\x2c\xb9\x42\x90\xbf\x26\x21\x84\x95\x05\xfc\xa0\x60\x85\x2a\x61\x1a\x46\x74\x2a\x56\x3f\x4e\x11\xe6\x85\x8f\xee\x51\xf9\x13\xce\x08\xcf\x9d\x14\xe0\x5b\x7c\x0b\x2d\x90\x74\x28\x51\x1c\xdd\x69\x1a\x56\x46\xb4\x28\x99\xb3\x20\x62\x5c\x0a\x10\x16\xf2\x46\xac\x34\x77\x8d\x1a\xbf\x1c\x19\xb5\xe9\x03\xde\x8d\x9e\xec\xbf\x57\x46\x46\x14\xb6\x6e\xc1\x1d\x83\xde\xa5\x90\x3c\xda\x2b\xe6\x79\x1b\x79\x24\x87\x84\x4b\x1e\xc8\xa8\x76\x69\x14\xf7\x50\x2a\x31\x42\x00\x6a\x36\xe5\x99\x6a\x0f\x2a\x5f\x68\x2f\xc1\xc3\xe1\x8e\x0e\x47\x9a\xc3\x70\x88\x0d\xfb\x60\xa7\xbd\x35\xd2\x64\x25\xa4\x71\x96\x6e\x45\xde\xa8\x09\x3e\x67\x94\xf7\x3a\x2c\x08\xcc\x9d\x00\x37\xc9\x63\xd2\x40\x70\x1a\x5e\x2c\xf0\x05\xef\xa2\xd8\xde\x2c\x2b\xe9\x72\x1f\x64\x89\xea\xf5\xfa\x64\x71\xfd\xf3\xd2\xd4\x4f\x5d\xaf\x51\xcc\x1c\xbd\xab\x7c\x2f\x27\xda\xfa\xa6\xbd\xaf\x63\x75\x8a\x38\x6d\xf1\xf0\x84\xf6\x4f\x26\x9f\xc4\x53\x87\xca\x17\xc9\x22\x86\x8c\xba\x95\x2a\xac\xaa\x1a\x8d\x60\x96\xd4\x75\xe7\x2c\x8a\x5b\xf9\xbb\x45\x47\xfd\xf0\x31\x6f\xb5\x51\x49\x8d\x2e\x6a\x24\x3b\xb5\x1e\x65\x30\xed\xa6\xab\x19\xe0\xf5\x6f\xe5\x3e\x5e\x21\xd4\xe7\xd1\x63\x19\x5f\xa1\xf0\xb0\xea\x19\x7e\x3a\xcd\x8e\x70\x38\x9b\x51\x11\xe3\x24\xb9\xa5\x4c\xcc\x6b\x73\xb0\x60\x3a\xb0\x9a\x92\xb1\x5c\x01\xc4\x1c\x31\xb4\x14\x3f\xbf\x7e\xc8\x86\xcf\xcd\xca\xb2\xe7\xd7\xca\x6f\x78\xb4\xfe\x35\x6d\x32\x11\x5b\xe9\xc5\x07\x2a\xaa\x3d\xd0\x53\x21\xab\x8c\x7c\x09\x69\x7d\xa4\xb7\xab\x1f\x04\x35\x57\x7c\x06\x49\xfc\xf3\x05\xc6\x03\x97\xd4\x3a\xe4\x2d\x63\x8a\x8d\xd6\x38\xec\x7b\x56\x79\x30\x45\x39\xe5\x6f\xbf\x14\xc9\x5e\x99\xb1\xab\x06\xf6\xc5\xa5\x32\x4c\xe1\xdd\x4b\x37\x37\xab\xd5\x5d\x9c\x0c\xbe\x11\xc5\xa4\x3f\x24\xeb\x72\x0f\xb9\xc0\xba\xd2\x07\x13\x14\xbe\xb2\xdd\xe3\xa4\x9a\x84\x54\x31\x94\x5e\xfc\x91\x90\x69\x08\x56\xad\xa2\xcf\xd9\xf9\xc9\x15\xac\xc2\xc7\x2a\x0c\x7c\x66\x8f\x89\x32\x38\xbf\xfa\x0c\x66\x08\x05\xf8\x67\x9c\xdb\x49\x11\x0c\xaf\x4e\x82\x90\xf5\xa7\xaa\xdc\x8d\x4e\xc2\x2a\x44\x3e\x98\xbf\x65\x94\x7f\x4f\x2a\x84\xe4\xf5\x48\xbc\xc0\xde\xce\x5d\xbf\x63\x17\xb7\x25\xbd\x89\x7b\xb3\xb6\x5e\x7e\xb4\x97\x9d\x5f\x53\xa8\x1a\x3c\x79\xae\x66\xd1\xc4\x4f\xd5\xe1\xc0\x74\xc6\x6e\x17\xa6\x39\xec\xcc\x48\xde\x65\x79\xb0\x2f\x43\x72\x60\x10\x6b\x1c\x37\x01\xe0\x00\x66\x4d\x38\x72\xae\x1c\xc7\xc4\x17\xd1\x99\x41\xb6\x51\xdd\x14\xc1\x9b\x6f\x51\x1b\xfd\x9b\xc7\x2c\xea\xbb\x3b\xf0\x76\x17\xfe\x6a\xb1\xa7\xc3\x63\xfb\xde\xb0\x78\x77\x07\xef\x38\x15\x1d\x58\x52\xb3\x55\xcb\x75\x14\x29\xde\xa2\x69\x14\xc7\x35\xe7\xae\xe4\x78\xbd\xf2\xe7\x9e\xf0\x86\x28\x82\xda\xb9\x35\x2f\xc0\x8b\xd7\x16\xd5\x92\x00\x0f\xd3\x7f\x50\x9e\xa3\x38\x6f\x63\x30\x7d\x10\x26\x12\x69\x22\x19\x46\x91\x4d\xc0\x5e\x35\xf2\xad\xac\x42\x9a\xef\x4a\x63\x9c\xb6\xf1\x21\x54\xb4\x7b\x31\x2d\x97\x58\x0e\xc1\xed\xd4\xc0\x05\x8c\xe9\x3e\x00\xad\x46\x3b\x4b\xc8\x48\xdc\x08\x77\xb9\x3b\x58\xe7\xd8\x75\x3a\x08\x8f\xc9\x6e\x33\xf7\x9a\xb9\xa8\xe4\x83\x4f\xc3\xdc\xcd\xb1\xe7\x01\x8e\x90\xb6\x1d\x1a\x40\xc3\x34\x61\xf4\xbe\x39\x9d\xaf\x6a\x91\x93\x9e\x8c\x83\x7d\x96\x31\x35\xd8\x4b\x69\x5a\x97\x42\x43\x69\x9c\x04\x9e\xff\x85\x1e\x13\xd2\x1a\x67\xfd\x5f\xe9\x6d\x27\x38\x4e\x34\x1f\x4b\xcb\xec\xcf\x92\xe0\x91\xff\x7c\xfd\x67\xad\xfa\xf7\x62\x9c\x15\x49\x63\x47\xca\x6e\x38\x5e\xd7\x47\x4e\x69\xf7\x74\x50\xde\x8c\xc2\x24\x6f\x1e\x8a\xc3\x94\xd8\xa0\x56\xd7\xa7\xd9\x48\x07\xfe\x05\x5f\xa4\xd2\x66\x2f\x0f\x48\x1c\x0e\x22\x34\x66\xb0\xc3\xa2\x88\x29\x19\x36\x02\x12\xe6\x64\xa2\xd3\x17\x10\xa4\x0a\x97\x1e\xe4\x6a\x68\x6a\xec\xd5\x5f\x1b\xdf\x25\xf8\x7b\xde\xa4\xe1\xea\x43\x7e\xd6\x73\x39\x64\x9f\x10\xb0\xba\x9c\x2c\xe0\x79\xb1\xef\xf8\x92\xf2\x94\x3c\x4e\x41\x6e\x7f\x8f\xe0\x88\xbe\x56\xa4\x0f\x48\x25\x01\x1b\x61\x39\x6a\x50\xb0\x9b\x04\xad\xce\x41\x3e\xe4\x83\xe7\x47\x33\x53\x06\x01\x64\xb8\xf2\xe9\x19\x10\x06\x82\x25\xa7\xc8\x23\x17\xed\xae\xcc\x69\xd7\x49\xe3\x3d\x0f\x79\xf2\x55\x9c\x7b\xdf\x94\xdf\xea\x0b\x16\x56\x9b\x6f\xd8\x14\x96\x6d\x14\x27\xcb\x18\x75\x20\x06\x47\x59\xc7\x3f\x8d\x26\xb5\x89\xbc\xab\x0a\xe2\x98\xb6\xdc\x6c\xe9\x8a\x26\x41\xfb\xce\xc3\x49\x8c\xed\xa5\xfe\x70\x3e\xac\xc1\x19\x3d\x2c\x56\x7d\xdb\x05\xd3\x01\x4e\xaf\x53\xd8\xac\xb7\xc6\xc3\x8c\x58\x85\xf4\x60\x35\xe5\x04\x5e\x82\xe6\x80\xa2\x3b\x10\x2a\x3d\xa8\xbc\x02\x78\x90\x04\x52\x13\xda\x6e\xc7\xc8\x68\xb4\x2a\xd0\xa6\x64\xd0\xf4\x71\xb9\x14\x6c\x95\x12\x35\x79\x67\x92\xb2\xff\x7c\xbd\xc3\x71\xb8\x79\xcb\xa8\x14\x56\xa1\x97\x97\x4e\x96\xfe\x49\xbb\x33\x84\xb3\x9e\xea\x34\x53\xfa\x2e\x06\x02\x9d\xff\x51\x86\x8d\x46\xcf\xa4\x69\x0f\xaf\x51\x1a\x57\xcc\x28\xb5\xc7\x39\x9b\xf0\xd3\x97\x80\x83\x5e\xf9\x37\x02\x12\xdc\x33\x3c\x91\xfa\x92\xb1\x06\xe6\x3a\x53\x6f\x05\x26\x68\x56\x0d\x9e\x25\x26\x81\xaf\x03\xe1\x05\x35\x7a\x70\xc1\xd8\x3b\xda\x29\x23\x64\x0e\xb4\xf4\x80\xfd\xac\x99\x9b\x85\x07\xbf\x36\x62\xf6\xc7\x32\x60\x6f\x0c\x90\xe8\x73\xbd\x19\x9a\x86\xbf\xe9\xfe\x01\xbe\x59\x63\xdc\xbc\xb4\x9f\xd1\x64\x0d\x0c\xd7\x78\xc5\xf1\x2d\x12\x23\xab\xe6\xa7\x57\xbb\x03\xf1\x78\xdf\x91\x9e\xd2\x54\x33\xca\x10\xcf\x4d\x89\xa4\x08\x7e\xf9\x8a\x64\xa6\x1c\x70\x1a\x4e\xad\xd5\xef\x99\x70\x58\x19\x41\xfb\x79\xa6\x21\xf4\x2e\xdc\x21\x56\x61\x4c\xdc\x21\xb6\x36\x4f\x3c\x6b\xaf\xbd\x53\xdf\x3f\xbb\xee\x5c\x07\x9c\xa4\x0f\xe7\xa0\x62\x3e\x8d\x74\x03\x66\xa0\x74\xd7\xa2\x48\xb1\x33\x25\x0a\x62\xa7\x2a\x0b\x8f\xca\x79\x58\xff\x35\x51\x63\x0d\xfc\x3c\xc8\x85\x98\x77\xdf\x5e\x49\x42\x7d\x80\xad\x30\xa0\x9c\xb9\x19\x44\x05\x56\x55\xc6\x3a\xad\x35\xc2\x04\x38\x0c\xaf\x67\x9e\x42\x58\x86\xa4\xf3\x15\x5e\x68\xc1\x75\x91\xcd\x64\xdd\x27\xad\xc9\x41\xff\xe2\x55\x86\x2d\xe8\x1f\xb7\x0c\xd4\xa2\x87\x1d\x3a\x2f\x78\xf8\x4c\x42\x95\xed\x77\xff\x9c\xc3\xfe\xca\x12\x10\x34\x8b\xba\x2f\x4f\xe2\x7e\x4a\xcf\x36\xb3\xb9\x93\x1a\x44\x04\x14\xc9\x1c\x3d\xd5\x45\xb4\xe6\xaa\x2d\xf3\xfb\xb3\x57\xb8\x2d\xa9\x15\x48\x0a\xf2\xc3\xc8\xc6\xd5\x5b\x53\xf4\x7f\xf7\xd6\xb0\x52\xc1\xf0\x8c\xa1\xc7\x62\xba\x48\x87\x29\xec\x5c\x05\x9c\x45\x6f\xe2\x21\xe9\x11\xf1\xe1\x4b\x0e\x03\x84\xe2\x1d\xbb\xad\x1a\x82\xea\x8c\x93\x78\x2a\x8b\x49\x7a\xeb\x1b\x58\x02\xfa\xb8\xeb\x93\x25\xe6\xf2\x1d\xc7\xa7\x03\x78\x76\x72\x14\x68\xa5\xfa\xc7\x9b\x07\x7c\xb6\xe6\xd8\x63\x17\x08\xc6\xb9\xfe\x77\x8a\xe5\x49\x98\xa6\x55\x99\xe0\x19\x21\xb6\x5e\x7d\x5f\x4b\x89\x91\xff\x65\xb2\x1b\xa3\x07\x94\xa8\x8d\x07\x23\x65\x51\x60\x2b\xf6\xaa\x60\x32\x13\xf2\x67\x71\xd2\x3d\xd3\x4a\x71\x1e\x51\x36\x35\x63\xca\xa9\x28\x40\x81\x9b\x40\x74\x84\xb2\x5c\x28\x45\x99\x88\x2c\x0f\x88\x8c\x6f\x56\x03\x31\x7b\xdd\x07\x7d\xba\x30\x34\x8e\xb4\x87\x50\x5b\xf3\x68\x2b\x32\x38\x21\x4e\x30\x8f\xfc\xc1\x66\x7f\x2b\x58\xf0\x3e\x5b\xc8\x52\x61\x08\xe5\x09\x08\xd7\xd3\xa7\x54\x00\x0b\x3a\xc0\xe4\xeb\x76\x25\xf8\xec\x44\xb5\x84\x71\x8d\xa8\x48\x95\xf7\xaa\xbc\x81\x9c\xbc\x21\x03\x14\x99\x58\xa6\xaa\x0e\x28\xe6\x41\x10\x95\xfd\x50\xd2\xf3\xdc\x69\x75\x1c\xb5\x19\x19\x82\x4c\xcc\xe9\x38\x9a\xbf\x60\x03\xf0\x66\xdc\x47\xf7\x32\xc5\x2c\x99\x63\x92\x4d\xa8\x0b\xdc\x26\xfd\x07\xcb\xdf\x54\xfe\x9e\xbe\x07\x53\x40\x32\x88\x2b\xa4\x27\xdd\x78\x79\x9f\x25\x4a\xe5\x75\x22\x20\xe0\x24\xbd\x3d\x4f\xc9\x47\x19\x08\x49\xf1\x27\x46\x86\x9e\x9a\x70\x0e\x74\xc6\xca\xba\xfa\x27\xff\xa7\xc1\xab\x4c\x4f\xb5\x9b\x9b\xc8\xcc\x21\x4a\x90\x90\x94\xaf\x8f\xc6\x8d\x98\x29\xc2\x2f\x76\xd7\xe8\x4f\xed\x1d\x6f\x3f\xe0\xab\xa7\xa1\xc5\x72\x08\x66\x3c\xf0\x69\x49\xe5\xdb\xb1\x2f\xc4\x01\xda\x6f\xce\x1b\x99\x20\x9d\xc7\x71\x7f\x69\xb0\x2d\xe5\xc0\xa9\xe2\x68\xcd\xd3\x54\xea\x6f\x53\xcf\x27\xf4\xdc\xec\x92\x28\x26\xb5\xaa\xd7\xf7\xac\x3a\x00\xe2\x35\xb2\x2d\x85\xea\x16\x78\x09\xf8\x2f\xf8\xc5\xcb\x75\x43\xfa\xd8\x49\xd5\x6d\x55\x40\x77\xfd\x93\xcd\xb6\x19\x7b\x7c\xad\x2a\x44\xf4\x25\x9a\xb8\x03\xda\x57\x06\x7e\xd6\x8b\xc7\xe7\xdf\x5b\x61\x5a\xff\x9e\x60\xc1\xb0\xc1\x7b\xaa\xe0\x72\xc4\xfa\x2c\xda\xa0\x50\x4a\x22\x33\xb8\x8a\x93\x4c\xc6\x77\xdc\xb1\x62\x62\xc9\x15\x2d\x6e\x2c\xb8\x41\xcc\x33\xcc\x54\x6e\xa3\xa6\x6f\x69\x2f\xb9\xc8\x0e\xf2\xf9\xe3\xda\xb4\x3b\x56\xa2\x1a\x91\xe8\xf1\x16\x5b\xdb\x30\x2b\x82\x3f\x1e\xc7\x4f\x3a\xa5\x63\x75\xd7\x3f\x47\x04\x4d\x0f\x5d\xb4\x55\x08\x40\x33\x6c\xfe\x1c\x23\x88\x3e\x64\xe7\x75\x4b\xdd\x56\x5a\x33\xcc\x6f\x0e\x73\x2a\xa3\x07\xed\x09\x61\xbc\x05\x5a\xff\xc6\xe4\x09\x62\x02\x38\xb4\x59\x8d\x2b\x58\xd4\x62\x3c\xb7\xe4\xfa\x8c\x17\x59\x5e\x0a\x9f\x69\x21\xc9\x73\xb5\x0d\x5c\xf7\x3e\x3f\x88\xf8\xca\x83\xf2\x5b\x84\x0a\x38\xcf\x6d\x57\x37\x53\x2c\x25\x56\x24\x89\x19\xb7\x09\xc0\x2e\x7f\x85\x8a\x3d\x37\xe3\x1f\xf3\x3c\xf8\x44\x72\x0a\x17\xb0\xf7\x0f\xd0\xdb\x7c\x7f\x1a\x51\x6a\xf2\x11\xab\x95\xe7\xe1\xb7\xad\xf0\x38\x83\x38\xb4\x60\x6a\x70\x91\x14\xbb\x73\xef\xb0\xe7\xc9\x60\x09\xc1\xb8\xa9\xee\x3c\xfd\x3d\x1b\xb4\xc7\x00\x05\xbc\x51\xc7\x38\x06\x7a\xdd\x43\x63\xf4\xaa\x91\x7c\x0a\x91\xf8\xd8\x99\xb7\xa2\xae\x5a\x33\x73\x88\x00\x5c\x8f\x71\xc2\xe3\x92\x0f\xc2\x51\xa2\x51\x1d\x60\x0f\x03\x4f\x74\x2d\x9b\x63\xb2\xd5\x6c\xd3\x11\x3f\xf7\x56\x5d\xa2\x3b\x12\xb8\x13\xad\x59\x07\x19\x61\x51\x68\xc1\x67\x92\x3b\xdb\xd3\x27\x4c\x2d\x4f\xdc\x10\x30\x09\x9d\x25\x57\x7b\x59\xbc\x6a\x14\x26\x45\x48\x59\x56\xc4\x95\xa6\x58\x8b\x83\x23\xce\x33\xab\x23\x51\x3d\x29\xdc\xa2\x6e\x80\x14\x8f\xfb\x6e\xed\x5a\xcd\xfe\x03\x73\xc7\xe5\x91\xbb\x47\xda\xf4\xd4\x61\x2a\x34\xbf\xa5\x19\x65\x76\xab\xc2\x1f\x37\x94\x84\xee\xbe\x6a\xe9\x3d\x2a\xda\x9b\xe0\xb1\x85\xc2\xc1\x18\x5d\xbe\x81\x8c\x76\xdd\x2b\x4d\x55\x58\x5f\x42\x3c\x33\x56\xdc\x41\xb4\xfb\x5b\xaf\x5d\x92\xf5\x4a\x25\x70\xfd\xbb\xd0\x2a\xd4\x9b\x69\xaa\xc5\x3b\xf1\x5a\xe7\xf2\x97\xa3\xe6\xc6\x87\x34\x09\x80\xc6\x34\xe3\x0f\x65\x9b\xf5\xe8\xe8\xe2\x67\xa8\x7b\x48\x57\x96\x23\x3f\x8a\xd6\x62\xe2\x76\xc9\xa5\x1a\x03\x5f\x27\x32\xf7\xba\x2f\xe6\x64\xef\x0a\xc9\xbf\x70\x91\x68\x58\xa9\x61\xac\xd9\x13\xf2\xc6\x97\x57\x3a\x2c\x0c\x21\x2b\x79\xff\xd6\xfa\x76\x5d\x5b\xe3\xc6\x9d\x65\x77\x02\xcb\x98\xaf\xfb\x54\x69\x36\x7e\xf8\x3c\x6c\xd8\xbb\x42\xb4\xa8\x24\x20\x68\xe2\x78\x04\x6a\x5f\x74\x3a\xd7\xef\x77\xa7\xac\x55\x4e\x2e\x59\x93\x46\x4d\xc8\x00\x14\xb8\xb9\x98\x37\xe7\x32\xe6\x7d\x48\x41\xbd\xd5\x8b\x57\x9a\xc5\x3f\xc6\x80\xdb\x25\x9f\x02\x3e\x51\x2c\xb5\xba\x2b\x98\x76\x1f\x7a\xd2\x39\xda\x0c\xdd\x76\x68\x2b\x25\xe2\x7f\x28\x2f\x75\x61\x7c\xe2\x7d\x76\x22\x6c\x28\x5d\xc3\xa7\xde\x74\xf4\x00\x37\xe1\x5a\xde\xe1\x4d\x44\x51\xb6\x27\x0d\x3c\x2e\x04\x3d\x41\x11\x3e\x03\x68\x39\x86\x48\xfc\x4d\x50\x68\x53\x1a\xc4\xc5\x22\x73\x8f\x85\x72\xfa\xfd\x66\x31\x57\xfd\x65\xf2\x26\x00\xe4\xc1\x91\xb9\x85\xd2\x60\xbc\x4a\xd3\xed\x7d\x25\x32\xc0\xa2\xeb\x78\x14\x04\xee\x8f\xa4\x81\x9f\x22\x55\xfe\x55\xe9\x26\x25\x70\xdb\xcd\x07\xa2\x8c\x57\x0f\xb7\xb5\x10\x91\xdd\xf1\x92\x98\xcd\x9b\xac\x1f\x1f\xfa\xca\x8c\x02\xca\x46\x11\xd2\xe9\x41\x2c\x87\x81\x89\x88\x20\x2d\xdd\xf9\x3a\x8a\xf3\xa7\xb7\xb4\xc1\x49\x80\x62\xba\x1c\xcb\xd1\x9a\x96\xda\x98\x69\xd9\xf8\xc6\xb5\xa5\x1e\xa3\xec\x2b\x0a\xed\x9a\xe8\x9d\x57\xeb\xe4\x6c\x2c\xad\xe2\xed\x5e\x2d\x3e\x65\x3d\x74\x5c\x63\x21\x30\xaf\x84\xea\xba\x60\xa5\x51\xbf\xd8\x9c\x72\x99\x22\xf2\x19\x19\x38\xbd\x1b\x3f\xe4\xf8\x69\x0b\xf1\xbd\x1b\xcd\x4c\xc1\xa3\x2e\x07\x8b\xf7\x7b\x1d\xfb\x8e\x8f\xba\x68\x43\xaa\xc4\xff\xf6\xe7\x36\x8f\xd2\xa1\x8c\xaf\x3c\xc9\x15\x1c\x40\x98\x7f\xbc\x69\xf8\xe9\x9d\xb3\x30\xb4\xed\x97\xa1\x9a\x2b\xec\xf6\xb1\xef\xab\x76\x15\xd4\x79\x73\x3c\xe8\xf6\xff\x09\x4e\xd6\xa7\x95\xf0\xa8\x9e\xa9\xe9\x8f\x21\x9d\x81\x6d\xdf\x9e\x0e\xa6\xed\x87\x74\xa7\xd1\x9a\x24\x44\x23\xc7\x0a\x42\x3e\xdb\x4a\x33\x17\x2c\xc1\xa0\x7b\x0d\x4c\xf1\xd0\xda\x64\x59\xb4\xa9\x88\xb1\xa4\x28\xa2\xb7\xf5\xe8\xb6\x08\xf4\x4f\x83\xef\xfd\x12\xc0\xdf\x31\x78\xb9\x33\xf5\xed\x62\x39\x1f\x1f\x57\xc3\xf3\x34\x6d\x42\x36\x18\x2c\x0b\x6a\x17\xae\xa7\x7c\xd6\x3f\xd3\x42\xd1\x65\xc9\x08\x95\xea\xba\x23\x47\x44\x09\x72\x4c\x60\x89\x0a\xae\x33\x27\x1c\x64\x14\xb7\x35\xc5\x94\x84\xac\x33\x66\xfe\xce\x22\x78\x1b\xab\xbe\xf7\x27\x00\x24\xfd\x6d\x60\x6d\x79\xc2\xdc\x0d\x33\x25\x77\x58\x37\xc1\x7d\xa5\xca\x1f\xac\xe9\x31\x79\xcb\x1a\x89\xd2\xda\xeb\xb5\xa2\xfd\x09\x80\x4f\xdf\xc7\x12\x7e\x70\x3d\x38\x89\x62\x5a\xf8\x05\x2e\xda\xf3\xa7\xef\xa9\x4c\xf8\x91\x5e\xc6\x11\x74\xe6\xff\x55\x8d\x31\x6e\xcb\x2e\xa8\xdf\x1b\x99\xe5\xe1\x96\x0c\xca\xef\xc6\x24\x8e\x06\xcb\x1a\xe6\x73\x64\xef\x78\xf7\x8a\xff\xd9\xae\xd2\xeb\x38\x2d\xcd\xcb\x98\xba\x55\xf0\xa7\x8c\xcc\xcb\xe6\x05\x3e\xde\xe0\xee\x5d\xb9\xe7\x31\x1e\x69\x77\x84\xca\xc2\x18\x64\x2d\x3e\xa3\xa6\x80\xe5\xc8\x6a\x46\x42\x23\x1b\x6d\xc3\x96\x72\xda\x14\x62\x0b\xd3\x4f\x5c\x1b\x2d\x39\x4d\xeb\x5d\xb5\x30\xe8\x91\xbc\x0f\x1a\xad\xe7\x9d\xd9\x3e\xe8\xe5\xc7\xe6\xf2\x0c\x8e\xe7\x46\x57\xf9\xb7\xa9\xdf\x41\x79\xee\x75\x89\xed\x34\x2e\xdd\xff\x89\xe1\xa3\xbc\x89\x2b\xa9\xa6\x44\x1c\x42\x5f\x6b\xbd\xd3\x84\x13\x71\xd1\x7e\xb1\xdd\x62\xac\x08\xf3\x47\x05\xfd\x45\x57\x15\xfb\x8e\x65\x16\x9f\xe5\x7c\x2e\x75\xf7\x70\x64\x7a\x18\x94\x7d\xe9\x4a\xc0\x6c\x2b\x92\x02\xbe\xa5\xed\x99\x04\x07\x4c\x3d\x04\xf0\x9e\x56\xa7\xfa\x62\x52\x09\xf8\x08\xf6\x13\x31\x2e\x54\x2a\x04\x1b\x91\x13\xb9\xc5\x0b\x1f\x18\xb4\x38\xaf\xd9\x72\x17\xa4\xea\x0e\xa9\x79\xdf\xf1\x68\xa3\xe8\xda\xac\x50\x4a\xda\xd5\xbe\x21\xe8\x8d\x9b\xb8\x88\x4a\x5f\x79\x0b\x3f\x1c\x95\xbe\x95\xf3\xdb\x37\x66\x91\xf8\xc3\xae\x85\xbb\x92\x3b\x53\x81\xc6\xd1\x0a\x07\x05\x43\x06\xd0\x36\xde\x0b\xef\x30\x24\x9d\xd4\xff\x4c\x9a\x61\x7d\xd2\xaf\x32\xe6\x73\x23\xf5\x96\xe2\x9d\xb9\x5c\x3e\xff\xb3\x6c\xe6\x24\x03\x42\xfb\x51\xc1\xb6\xab\x1d\x1f\x99\x96\x29\x9f\xb4\x0f\xa0\xfb\xc9\x60\xca\x25\x0a\xa5\xc9\x36\x9e\xce\xf1\xe6\x52\x87\x87\x74\x23\x9f\x99\x1b\x31\x52\xc4\x1d\xd9\x6f\xa2\xa7\x27\x3d\xe3\x42\xe8\x0a\x5e\x53\x8a\xc6\x1d\x5e\x10\xa3\x54\x6d\x55\x85\x73\x57\xad\xf7\x7f\x80\xed\x13\x66\xce\xe6\x32\x38\x7e\x82\xc8\x4b\xf5\x12\xbc\x69\x72\x16\x73\x59\x08\x02\xae\xd6\x71\xb8\xd0\xa8\x46\x2a\x31\xcf\x57\x08\xff\xdd\xa1\x4d\x82\xa1\x11\x01\x6b\x6a\x10\xbb\x24\x6f\x50\x23\xd1\xdc\xfa\x2c\x24\x7c\xdf\x26\xf7\x8b\x86\x77\x55\x53\x82\xb1\x5a\x04\x9f\x25\xb9\xd6\x04\x42\x68\x73\x14\xae\xb2\x91\x78\x1e\xec\x92\x61\x4f\x86\xb8\x94\x94\x90\x8a\xae\x7a\x42\xf5\xf2\xc1\x18\x9d\x2f\xbd\x9e\xef\x60\x9e\x2d\x3d\x8b\xc9\x22\x4a\x58\xc2\xc6\x20\x57\x3e\xc3\xd6\x8a\x6f\xf2\x5c\x76\x87\xc6\x7c\x48\xb6\xfc\xb9\x01\x6d\xdf\x9d\x92\x57\x5b\xfc\x4d\xcd\x72\x8e\xd0\x69\xdb\xd8\xcd\x92\x8d\xff\x0a\xa6\xbc\x1c\xb5\xce\x2e\x1e\x52\x8d\x87\xe9\x01\x2b\xfa\xfc\x5a\xe9\x25\x2a\xa5\x13\xd5\x9c\x7e\x8a\xb8\x93\x77\xdf\xdd\x99\x66\x2f\x24\xdd\xa0\x82\x76\x05\xb2\x32\x7f\x21\x85\xf6\x7e\x24\x67\x0f\x91\x66\x93\x73\x07\xd6\x4b\x50\x32\x67\xc4\x06\xf4\x13\xa8\x8c\xf6\x11\xba\x1b\x2b\x62\xc7\xad\x6b\xf5\x25\x6f\x6c\xb5\xb6\x33\xe8\x39\xb2\xdf\x07\x57\xfe\xad\xfa\x57\xbc\xda\xc9\x0b\xdf\x6a\x31\x4b\x77\x9b\xd9\x0c\x87\xf3\x33\x2e\x57\x01\x9f\x18\xf1\x60\x62\x2a\xfc\x84\xbc\x83\x05\x3c\x4c\x8a\xbc\x71\x7f\x29\x22\xc6\x01\xee\xa7\x3f\xf1\x0b\xf1\xe8\x60\x30\x6d\x52\x24\x0c\xe3\xdd\x57\xc6\x29\x60\xb8\x14\x0b\xc3\x69\x2d\xad\x17\x2d\x05\xec\xae\xaa\x31\xa3\xbc\x56\x90\x2d\xab\xd1\xe7\xf0\x0a\xd5\x22\x2e\xbd\x88\x65\xfc\xfa\x02\x25\x7c\x62\x3f\x64\x2a\x30\x16\x7c\xb0\xac\xf6\xd1\xed\x67\x2d\xb7\x62\x58\x18\xa5\xc6\xeb\x04\x4a\xb5\x41\xb5\x3a\x08\x15\xc7\xd8\xdd\xd0\xb2\x13\x0d\xad\x43\xf8\xf5\x3a\xcb\xaa\x93\xaa\x12\xdc\xed\x6e\x4e\x7b\x6a\xc8\xe4\xcc\x32\x62\x56\x5e\x23\x89\xcf\xc4\x21\x11\x0a\xa7\x04\xb8\x73\xbe\x5c\x62\x9d\xee\x72\x0c\xd7\xf1\xaf\x8e\x9c\xca\x17\xd8\xb1\xe5\x75\x42\x66\xe2\xee\x14\x91\x1a\x3b\x26\x62\x67\x27\x1d\x54\x38\x72\xb4\x98\x3c\xb0\x1e\xa6\xd7\x01\xbf\x07\x5a\xa4\xda\x81\x9f\x08\x72\x08\xd0\x20\xc3\x56\x03\x55\x02\x94\x68\x75\x93\xa7\x11\xd3\x6c\xf4\xef\xfc\xd5\x54\x42\x68\xf2\x17\xcf\x7f\x05\x6c\xeb\x4c\xbe\x28\x50\x29\x76\xfc\xe0\x05\x2a\xa5\x1d\x8b\xfa\x9b\x93\x50\x5d\x2c\x29\xba\xd0\x18\x2f\x7f\x38\x1f\x0f\x88\x50\x85\x1d\x7e\x22\x45\x36\x42\x8e\xff\x3d\x7f\xf8\xee\x5e\x25\x59\x8b\x0a\x0e\xd0\x7b\xd1\x72\xbf\x41\x2d\xc1\xce\x82\xb2\xbc\xc6\x15\x32\xb0\x20\xcc\x3f\xea\x5a\x79\x46\xd3\x7d\x4b\xc0\x8f\x88\x33\x26\xb0\xc7\x3f\x94\x37\x92\xad\xb4\xe3\xc8\x6f\xe1\x4d\x49\x2a\xbe\xde\xa4\x38\xa9\x3f\x89\x80\x54\x77\x3a\x21\x26\xee\x28\x0b\xee\x9c\x4f\x9f\x20\x58\xf6\x89\x79\xd7\xdb\x24\xaa\x8b\x54\x97\x14\x12\xf8\x5d\x8f\x41\xfb\x3f\x1f\x6a\x68\xc7\xfd\xdc\xdc\xc0\x69\x53\x50\xe8\x8b\x36\xbc\xa2\x9b\xd3\xb2\xb4\x3f\xe3\xcc\x1f\x96\xe1\xd3\xd1\xd9\xcf\xb3\x08\x16\x11\x04\x54\xeb\x97\x01\x05\xa3\x27\xd7\xbe\x13\x3d\x7e\xa4\xda\x3d\xf0\xac\x75\xdf\xd3\xc6\x44\x38\x59\x3f\x25\x74\x62\x2f\xe3\x1f\x9e\xb3\x4d\x67\x49\x5f\x29\x8d\x0c\x94\x4f\xf9\x58\xed\xe1\x8d\x1a\x98\x8f\x86\xb5\x76\x0c\xfa\x0e\xd1\xd2\x97\x75\x56\xf4\x26\x11\xcb\xb3\x0b\xd7\x2a\x1c\x0f\x7e\xf3\xed\xe5\xf2\x86\xcd\xdd\x8d\x6a\xc8\x90\xc2\x63\x11\xbf\x2b\xd1\x26\x13\xdd\xc0\xc8\x04\x0c\x32\x16\xdd\xc6\x02\xe9\x04\x3e\x47\x29\x2c\x2b\x2d\x43\x3c\xf7\x3c\x98\x43\x54\x50\x39\xb5\x5d\x3b\x83\xff\x8f\xfc\xf1\x5a\xcb\x39\xce\x10\xf7\xef\x8e\xe3\x22\x02\x33\xaf\x50\x38\xf9\x22\x05\x5a\xd5\x57\x59\xe2\x75\xd0\x55\x89\x1f\x90\xa3\xf5\x8a\xf2\xd4\x5e\xf4\x20\x26\x2e\xbe\x1f\xb9\xfb\x03\x80\x6e\x25\x35\x52\x5c\x72\x44\xef\x66\xf9\x71\xb5\x8b\xea\xe7\x02\xe4\xb1\x2a\x6e\xd8\xab\xf1\x35\x43\xb6\x7b\x14\x04\x0e\x3c\xac\x2e\xce\x62\x0c\xf6\x00\x74\x6a\xbc\x6b\x8a\x52\x46\x8d\xbf\xda\x7c\xbb\x94\xe0\x10\x21\x21\xf0\x3b\x26\x12\xc9\xa6\x9c\xf5\x8d\x40\x46\x40\xe7\xf4\x80\xa4\xc7\x4a\x28\x2c\xcb\x7e\x52\xf1\x69\x86\x58\xbe\x0e\x9b\xd3\x3c\xb0\xfd\xd6\x27\xda\x10\x21\x79\x07\xd6\x5f\x00\x47\xc0\x9f\x91\x64\xcc\x91\xbb\x16\x70\xcd\xdd\x50\x06\xa9\x4c\x69\x70\xb7\x5d\x71\x26\x30\x69\x8b\xc7\x1b\x4e\x1d\x57\x70\x56\x98\x35\x51\x37\xc4\x46\xe4\x0f\x2c\x0b\xdc\x6c\x58\xd5\xd6\x3c\x54\x19\x1e\x3c\xde\xa3\xac\xa0\xa9\x8b\x23\xb4\x52\x31\x4e\xa3\x36\x9b\xa7\xd3\xc3\x2f\x8a\xe1\x9c\xa5\x06\x74\x34\xfd\xb3\x9f\x2f\x4c\x12\x82\x0b\x03\xc7\xc7\xdf\xfa\xe4\x46\x35\x6e\x52\x7c\xb1\x61\x06\x05\x14\x33\x13\xa2\xf8\xa3\xc4\x57\x32\x4a\x41\x7c\x7f\x54\x1c\xa4\x9d\x1c\x24\xb5\x7d\xb2\x59\x0f\xcc\xcc\x86\x93\x74\x05\xa5\x5b\x75\xb9\x48\x3c\xda\x62\x43\xca\x05\xbb\xf6\x79\x7a\x45\x4a\x69\xe8\x41\xbb\xa9\x73\x92\x7f\xd1\xd1\x99\x7f\x5b\x69\x8e\x48\x91\x9c\x2a\x92\x6a\x3a\xfd\x2e\x07\xcd\x44\xbe\xda\x0f\x2b\xfb\x62\x85\xcf\xf0\x16\x28\xa6\x37\x12\xa2\x15\x64\x22\xd7\x51\x75\x47\x1f\x25\x11\x7d\x45\xe3\x57\x87\x4e\x76\x63\x86\xe0\xe9\x60\x0d\x55\x7b\x7e\xc3\x09\x6f\x1c\xf0\x95\x30\x6b\x34\x3e\x85\xfe\x53\x22\x15\x3c\x39\x58\xd5\x2e\xc6\x47\xf5\xa3\x35\xa6\xe1\x65\x27\x4c\x21\x75\x0e\xac\xd6\x79\xc6\xc4\x1c\x03\xc2\xf0\x02\xf0\x8b\xc7\xbf\xb2\xdf\x13\x6d\x22\xa5\x09\x4a\x76\xf6\x72\x15\x9f\xe9\xeb\x56\x1a\x92\x40\xec\x76\x40\xe4\xca\x60\x09\x28\xce\x61\xb7\x19\x6a\x51\x3a\x96\x0a\x6b\x43\xaf\x9c\x25\x1e\x17\x95\x1c\x5e\x04\x00\xa4\x9a\xf0\x8e\xe0\x1f\x01\xab\x21\x3c\x86\xc3\xf6\xc8\x98\x35\x28\x3a\x20\xeb\x24\xf1\x93\xa0\x8c\x9d\xa7\x3d\x71\x6b\x16\x64\x98\x6a\x72\x90\xf6\x9d\xe9\x36\x21\x28\x8f\x8d\x51\x5c\x39\x84\x4b\xd8\xe2\x91\xea\x81\x7a\xc6\x53\x31\x9d\x86\xe9\x1f\x4a\x87\x4a\x33\x10\xf8\xda\xa1\x34\x4d\xc0\x6f\x0e\xe2\x04\x00\x92\xb9\x75\xf7\xaa\xdd\x03\x3f\xb3\x35\xb4\x39\x95\x1a\x08\x4d\xc3\x8a\xf6\x0b\xca\x38\xfd\x15\xfc\x26\x54\x22\x9b\x1b\x95\x74\x3e\x51\xeb\x05\x7b\x31\xba\xe5\x96\x6b\xe3\xb0\x24\xba\x87\x39\x38\x06\xf8\x87\x9c\xfd\x3c\x6f\x89\x37\x0d\xc2\x70\x2c\x52\x62\xf7\x4f\x85\x09\xb3\x1e\x8e\xc8\x71\xb9\xbe\xa5\x8d\x31\x6e\x60\x50\xfb\x03\x90\xc7\xbe\x43\xb1\x50\xf7\x79\xbc\x53\xf1\xb2\xd9\xf9\x31\x38\x8e\x81\xce\xe9\xb5\x9a\x9e\x00\xdc\x0e\xad\x03\x72\x67\x35\xc6\x31\x60\xcd\x92\x31\x84\x8a\xdc\x0e\xba\x55\x55\xf0\x7b\xa7\x9c\xc3\xa3\x86\x8a\xc7\xf6\x96\x38\x99\x7f\xb1\x47\x78\x28\x0a\xf5\x06\x27\x0a\x06\x0a\x35\x1b\xc8\xc2\x8e\x0b\xbb\x67\x9f\x98\x65\x63\x9f\x98\x32\x98\xcc\x94\x3b\x17\x2f\x9b\xbe\xab\x25\xc6\xcc\x86\xc9\x12\xd3\xc6\x49\x70\x6f\xdf\x7a\x93\xcc\x5e\x49\x3e\xa0\xd6\x04\x38\x5f\x7e\x49\xa1\xbe\xeb\xbf\xad\x80\x7a\x9d\x71\xa0\x3c\x4d\xe2\x4e\xd0\x64\xbf\x0e\x8b\x8e\xe0\x94\x83\xbf\x08\x75\xf7\x15\x80\x44\x31\xe9\x47\x36\x75\xb1\xd7\x6f\x63\x44\x8e\xb8\x57\xd5\x60\x3a\x3a\x6a\xde\xbf\x17\xd4\xcf\x2e\x18\x8b\x01\xd3\xc4\xdd\x38\x35\x5a\x16\x0a\x67\x70\xd7\xbf\xea\x3c\x14\x22\xa1\x25\x06\xcf\xc0\x31\x59\x9c\x57\x1d\x2e\x23\xe4\x4a\xfd\x7b\x75\x8a\x08\xa1\x11\xc9\xcb\x9a\x65\x8e\x86\x23\x88\xb3\x0c\x9a\xbe\x2b\x63\xe8\x17\xf3\x72\xd6\x70\x34\xc5\x52\x5e\x5b\x4b\xfb\xbf\x6b\x31\x63\xc5\x7c\x0c\x28\x83\x46\x34\x26\xf4\x95\x40\x22\x4e\x0c\x11\x79\xfd\xfb\x94\x1f\xd7\x53\x46\x43\x83\xa8\xcb\xb9\x9b\x3f\x6e\xe0\x43\x9a\x91\x75\x3b\xd6\xff\x4e\x01\xca\xe8\xfc\x0a\x28\xeb\xb1\xce\xf0\xa6\x48\xcd\x04\xd0\xcf\x87\x21\x8c\x7a\x88\x05\xb4\x79\x13\xe8\x31\x7b\x68\xdd\x5e\x13\x43\xcb\xdd\xe4\x83\x09\x0d\xe1\xc1\x95\xba\x31\x98\x50\xcc\x2d\x36\x88\x02\x7c\xe9\x39\x46\xd2\x27\xcc\x89\x9f\x02\x8f\x19\x19\x66\xe2\x69\x8a\x15\x62\x80\x80\x1f\xbd\xb8\x28\x1b\xa4\x41\xe2\x26\x24\xf4\xf0\xdc\xfb\xc8\xed\x30\xc2\x64\xae\xdc\x6d\x2c\xbe\xf2\xf6\xfc\x2f\x92\xcb\x65\xed\x8c\x76\x08\xbb\x3d\x5e\x63\x00\x30\x4c\x9e\x99\x1a\xe3\xf6\x9e\x18\x2c\x3e\xb5\x70\x4c\x67\x85\xf7\x44\x58\x13\x55\x52\xa2\xcd\x6d\xdf\x7b\x78\xe5\x5f\x2d\xfc\x11\x3e\xee\xb0\x64\x2c\xa1\x2b\xb3\x74\xd9\x9c\xc1\xd7\x10\xfc\x0b\xb9\x84\xe2\x98\xd3\x76\xbf\x49\x10\xe9\xe8\xd4\xa3\x04\x1b\xa8\xf5\x91\xe1\x26\x5a\xcb\x4e\x02\x12\x01\xfc\xd2\x67\xc5\xd1\xb8\x36\xa6\xee\x97\x8a\x86\x70\x16\xbd\xd0\x24\x2c\x7f\xf4\x0f\x55\x3a\xa6\x64\xe5\x9a\x6d\x65\x39\x72\xb9\x53\x48\xc2\x58\x80\xa1\x18\x83\x3d\x75\xff\xbc\xa6\x6a\x92\x51\x25\x0a\x71\x18\xf2\x40\x9b\xbd\x49\x57\xdf\x58\xd8\x0d\xbd\x01\xc0\x38\xb3\xb7\x25\xcc\x99\x85\x47\x01\xd7\xee\xba\x60\x18\xf2\x0d\x44\x57\x8c\xb5\xb5\xce\x43\xc4\xe9\x48\xc8\x76\x3a\x50\xb4\x63\xc6\x16\xb3\xaf\x51\x3d\x1c\x80\x63\x95\xf7\xe3\xf6\x35\x80\x71\xe9\x29\x39\x00\x56\xfe\x2a\x43\x9a\x84\x15\x65\x7d\x74\x7b\x26\xca\x14\x4e\xfa\x1b\x50\x39\xb7\xb4\x03\xb3\x10\x4f\xcc\xe8\x1d\xbb\x06\x8c\x6f\x86\xbe\x70\xbc\xce\xa7\xea\x9b\xf1\x60\xcd\xa6\x0d\x5f\xc0\x9b\xc1\x3a\xec\x44\x55\x23\x61\x80\x46\x7f\x15\x7b\x8e\x4d\xeb\xec\x03\xf6\x7a\x34\x1e\x86\x6e\xdb\xd6\x35\xf9\x92\xac\x69\xa2\xa7\x87\xa4\x01\x36\x92\xb0\x5c\x7d\x3e\xa3\xa7\x56\x09\x2e\x61\xfb\xf8\x59\xa7\x8a\xee\xf7\x0e\x97\xf9\x56\x85\x9a\xd6\xb9\x20\x26\x85\x3a\xbe\xc1\x7f\xa7\x9f\x03\x4b\xff\x82\xb5\xd4\xf2\x93\xd8\x58\x65\xa7\xf8\x64\x39\x84\x50\xcf\x34\x48\xab\x65\xb2\xbc\x49\x60\xd4\x7a\x25\xc0\x6b\xb9\x56\xa3\xdf\x01\x55\x01\x73\x68\x51\xbc\x95\x7f\xff\xab\xfe\x9d\xdb\x2e\x4a\x77\x25\x63\x30\xf6\x0d\xff\xc1\xf0\xef\x53\x63\x04\x5a\xf1\xb9\x47\x17\xf1\x26\x86\x12\xb0\x39\xd6\x2c\x19\x46\x8f\xcd\xd0\x14\xb3\x0b\xe3\x53\x10\x37\x69\x13\x63\x2e\x48\x42\x32\x8a\x83\x6c\xdb\xe0\x65\x63\x13\xb7\x6e\x54\x53\x0c\xa8\xea\x33\x2f\x67\x7b\xd6\xba\x98\xb8\xbf\x3a\xc1\x7d\x5f\x5d\x02\x83\x91\x2a\xd7\xfb\xd0\xeb\xb8\x9c\x8c\x43\xf8\x4d\xe6\x84\xc8\x09\x03\xf0\x6d\x55\x19\x29\x53\x7d\xd5\x42\x71\xb8\x70\xe9\x74\x73\x7f\xe6\x33\x5a\xc1\x38\x94\x13\x9d\xc2\xc0\x5b\x4f\xc9\x76\x80\x01\x26\xfe\x65\xcc\x04\x5b\xf9\x85\x6b\xf4\x83\xe9\xc5\x73\x4b\x7b\x52\x0a\x8e\x5f\x27\x78\xc8\x9c\x96\x11\xd0\x89\x88\x0a\x1d\x5d\x6a\xd8\xc5\xed\x1a\xda\x9a\xd0\x9f\xe0\x1a\x20\x0b\xcf\xea\x68\x3b\x22\x6b\x5b\xb5\xfc\x51\x6f\x38\xf0\x54\x33\xaa\x38\x12\x9b\x34\x1c\x40\x09\xf9\xb3\x1c\xe9\x59\x73\x35\xdb\x0c\x31\x2e\x44\xd5\x4f\xef\x5a\xdd\xa9\x31\x99\x37\x7b\xdf\x8b\xdb\x50\x76\x4e\x47\xd2\x9c\xed\x45\xa2\x96\x84\x8f\xce\x53\x79\xca\x65\xb1\xa4\xfb\x69\xbe\x1b\xbf\xb2\x5a\x12\x05\x51\x2b\x50\xb3\xe4\x8e\xc9\xdf\x7d\x64\xa8\x1c\x30\x24\xec\xf0\x2c\x0b\xc2\x40\x0a\x82\xe2\x32\xba\xeb\x34\xf5\x16\xe1\x67\x71\x7b\xc7\x43\x3f\xc7\x32\x05\xe9\x4c\x59\x4c\x97\x83\x37\x55\xaa\xe9\x15\xa1\x4a\xde\x5c\x28\xa3\xc9\xdd\x0a\xb3\x0f\xf7\x6c\x65\x1c\x44\x60\xe6\xaa\xb2\xbf\xe6\x89\x66\xa4\xa9\x5c\x4f\xb4\x83\x35\x5e\xf6\xb7\x14\xcc\xbe\xd9\xef\x63\x57\x2c\x99\xe3\x0c\x3e\xe6\x1d\x68\x5b\xb5\xa9\xaf\xed\x74\xfc\x4a\x3f\xed\x73\x7e\x83\x9a\x9c\xf6\x11\x38\xee\x14\x35\xa7\x3b\xb8\xf1\x5b\x9c\x66\x49\x61\x1c\x58\x72\x5a\xea\xce\x45\x0e\x7d\x64\xdc\xf8\x55\xaf\xd0\xdc\x52\xaa\xfb\x04\xde\x2f\x78\x58\xea\xd3\x1a\xdc\xc8\x91\xfd\x76\x36\xb6\x73\x10\xe5\xcd\xed\x1f\xba\x30\x03\x85\xb7\xcf\x57\xf3\x9c\xa3\xe0\x7f\xbb\x04\xed\x99\x65\xd6\x50\xd6\xe2\xba\x32\xfd\x5b\x73\xcc\xf1\xdb\xb5\x42\x08\xcc\x4d\x55\x7f\x50\x7f\x7c\x4c\xbb\x56\x1c\x3f\xb7\x85\xc6\x6c\x0a\x96\x92\x69\x70\xed\xb1\x91\x2e\xa1\x1f\xc8\xdd\xb6\x91\x01\x47\xcb\xb0\xdf\x35\xea\xf6\x61\xbc\xe9\x57\x5d\x0b\x1e\xc2\xf5\x3f\x1c\x55\xae\xdb\xd2\x7d\xa2\x76\x2f\xc7\xbd\xa7\xbd\x44\x70\xf1\xd4\x6d\x24\x14\xc4\xba\x09\x72\x25\x54\xb0\xcf\x14\x0a\x97\xb8\xb4\x61\x26\x43\xcf\x48\xba\x38\x9a\xe4\x5a\x02\xe0\x7e\x83\x2b\x8e\x13\xf7\x86\x60\x6f\x67\x5c\x4e\xaf\x0c\x9a\x3e\x01\x29\x7d\xbf\x29\x32\xd5\x3c\x06\xa6\x7a\x60\x56\x77\x56\x64\x19\x01\x70\x43\x64\x0d\x5f\xc1\x8d\xea\x9a\xa5\xf8\x4c\xc9\xe3\x01\xcc\xc7\x33\xfa\xc2\x16\x55\xf5\x4e\x37\x13\xcf\xb7\x15\x03\xe2\xe5\xc3\xe9\x22\xde\xd4\xfe\xc1\x5c\x82\x22\x69\x67\xa4\x72\xba\x34\xfd\xab\xe5\x8e\x65\x19\x73\x35\x93\xd1\x93\xd5\x93\x9b\xcb\x44\x50\xb2\x08\xb2\xe3\x23\x05\x72\xe2\x61\x2b\x8d\xd8\xdd\xa9\x53\x5f\xe0\x25\x47\x0f\xca\x2e\x94\x4f\xe7\x99\xaf\x7d\x86\xc2\x4d\x8b\x81\x19\xc6\x4d\x4b\xc2\xd8\xdb\x0e\xfc\xac\xd0\x28\xeb\xe8\xe3\xab\xf2\x49\x9e\x7e\xe2\x5a\x94\xe7\xb4\x74\x1d\xe1\x28\xb7\x66\x54\xc3\x5f\x2a\xda\xad\xa9\xa9\xe5\x23\xbb\xdd\x51\xd3\x97\xa2\xcf\xa5\xea\xa7\xb4\x3e\x6f\x09\xc5\xe4\x77\xa0\xba\xe1\xc2\x35\xb0\xd8\x7b\xd8\x27\xd2\x98\x30\xcd\xda\xb2\x9c\xf7\xac\xbd\x88\xfc\xe2\x1d\xce\x01\xe0\x4f\xbd\x1a\xfa\xd3\xe0\x52\x36\xe2\x02\x1d\xbe\x1a\x0d\x01\x39\x7a\xb7\xb1\xde\x08\xf7\xcc\xe9\x96\x76\x29\xb4\x0a\xf5\x53\xe8\x54\xd3\x61\xc1\x6c\x45\xfa\x5f\xdc\x78\x3a\x5e\x77\x23\x74\xa7\xa5\xb5\xa9\xe5\xd9\xdf\xb2\xe2\xf7\xe5\xce\x82\xd2\x8e\x37\xa6\x8b\x57\x47\xe5\xe4\x0f\xa9\x7d\xa7\x26\xa9\x41\x77\x63\x94\x70\x84\x13\xfe\x28\x16\xaa\x9f\xcc\x43\xe0\x5e\x19\xf9\xa8\x72\x52\x47\xc0\xc8\xff\x0e\x82\xc3\x15\x54\x14\x53\x12\x77\x14\x0d\xb6\xf5\xe4\xba\x91\x14\xab\x1a\x63\x51\xf7\xf0\x3a\x22\x1f\x8b\xac\xc6\xaa\x0d\xa2\xd2\xf5\x31\xf9\x23\x2d\xa7\x88\x23\x0a\x6d\x71\xba\x6c\xd9\xf6\x96\x00\xc4\x10\x9b\x64\xc5\x78\x50\xc6\xd9\xf3\xd8\x06\x97\x68\xc2\x8c\xd2\xda\x5b\x8a\x3f\xc1\xa6\x56\x17\x3f\x02\x7e\xd0\xcb\xe5\x94\x2b\xaf\x0f\xe4\xd1\x31\xb4\x87\x7e\x6c\x72\x7b\x7a\xbd\x4b\x52\xaf\x0e\x1d\x87\x41\xd3\xdd\x2c\x9f\x0b\x6b\x25\x54\x6e\xb2\x5b\xd3\x7e\xab\xf7\x5c\x4e\x2e\x22\x30\xd9\x90\x56\x88\x15\x5f\xef\x64\xbe\xc7\xe5\xa6\xf0\xfb\x60\x96\xbb\xeb\x13\x7a\x95\x01\xca\x11\x61\x2b\xea\x09\x1e\x35\x4e\xef\xe6\x40\xd5\xc2\x18\x4b\x79\xf5\xd7\x35\xa9\x01\xea\x74\x07\xc9\x2e\xb0\x3a\x85\xb4\x56\x15\x01\xd5\xcc\x28\x54\xc9\x19\xed\x66\x41\x72\x4f\x81\x7c\xc9\xef\x26\xaf\xd1\x67\x1c\xf9\x24\x23\x21\x31\xa1\x74\xe8\x07\x09\xbe\xcb\xa1\xd6\x48\xf2\x5c\x46\x4f\xed\x77\xc9\x58\x85\x7c\x66\xf6\x06\xb0\x6f\x2b\x67\xbc\x7a\xbd\x0e\xe6\x52\x86\xc2\x97\xe1\x5a\xf9\xae\xbb\xbf\xaa\x98\xe1\x09\xb4\x10\xd3\x64\xb7\x11\xe4\x29\x88\xd6\xc5\x6f\x9a\x53\x20\x4f\x29\x2e\x68\x73\x92\xd4\xea\xcb\x3e\x0d\x02\x78\x71\xc2\xc3\x2f\xe9\x05\x7b\x9c\xee\xa2\x02\x01\x9b\xc3\xdc\x97\x8d\x9c\x5f\xf3\xbb\xc8\x82\x97\x3e\x5a\x2c\xae\x7a\xcb\xcd\x1a\x88\x91\xc0\x2d\xdd\x64\x07\x4f\x2e\x77\x92\x93\xdf\x98\xce\xd4\xad\x7a\x16\x55\xc6\xad\xf0\x39\xfc\xb2\xdd\x98\x9c\x72\x45\x47\x09\xe9\x6b\x95\x2a\x8a\x6a\xa5\xd0\x7a\x1c\x26\x07\x56\x20\x9e\x37\xf9\xad\x89\xc1\xbe\xfb\xf6\x09\x6b\x36\xdf\x60\xb9\x8b\xe2\x88\xa3\x4b\x93\x92\x9e\x1e\x96\xbd\x15\x3b\x37\x60\x1f\x10\x1c\x5c\xa7\x8e\x1e\x33\xf5\x71\xb4\xdf\x72\xc5\x86\x65\xc0\x5a\xfd\x54\x27\x37\x14\x19\xa4\xbd\xb6\xc8\x8b\xff\x7b\xea\x75\x1c\xe5\x48\x1d\x03\x76\x03\x99\x62\xc9\xd9\x61\xa2\x89\x67\x9f\x6d\x53\x71\xe8\xec\x9a\x35\xa3\xf0\x69\xf6\x5c\xa8\xef\xcc\x6b\x6b\x8f\x0b\x2a\x97\x4d\x39\xcb\xca\x52\x97\x3b\x62\x0c\x85\x49\x55\x49\xc8\xdb\xb8\x98\xbd\x1b\x97\x4a\xc6\xb6\x5d\x5d\x99\xc5\x8a\xf2\x48\xc8\xf7\xd6\x7b\x2e\xae\xc8\x60\x97\x23\x93\x56\x26\x23\xfe\xab\x92\x3c\x77\xdd\xd9\x1c\x0d\x2d\x43\x7d\xb9\xaf\x28\x1a\x5d\xc5\x8e\x39\x9c\x2b\xa5\x6e\x4c\x14\x84\xd7\x5a\x10\xb1\x40\x1f\xee\x54\x2c\x95\x11\xa7\x8a\x79\x99\x82\xb4\xbe\xe3\xb5\xcb\xfa\x28\xb1\x58\x16\x86\x14\x8d\x2f\xa2\x9a\x34\xda\x21\x1f\xb7\xa8\xb0\x8e\x93\xaa\xe4\x60\xa7\x76\xbf\x64\xd1\x62\xe5\x98\x2c\x21\x1b\xb5\xb4\xb9\x38\x4e\x97\x33\x88\x2b\xa8\xcd\x91\xa7\x21\x92\xf9\xb8\x4b\xf0\x2c\x9c\x12\xff\x62\x37\xe6\x19\xac\x66\x40\x97\xef\x8a\x7f\x85\x85\xc0\xd0\x6b\x50\xb9\x0d\x1b\x97\xbc\xca\xf3\xee\x0b\x3a\x9a\x92\xf6\x96\x7a\x4d\x64\x89\xb0\x18\xeb\x03\xbd\x09\x14\x8d\x76\x91\x41\x95\x99\x9a\xcb\x44\x8b\xa6\xf3\x3c\xa6\xb4\xf2\xfa\x98\x02\xbe\x26\xc7\xae\xdb\x38\x7e\x95\x66\xb9\x3c\x46\xa1\xae\x17\xfa\xd3\x4f\xcb\x46\xf1\x86\x43\x78\x32\x32\x6c\x16\xc7\xa6\x19\x08\x38\xbe\xc7\x14\x4d\xdd\xc3\x72\x33\x48\xe1\xb7\x4e\x6c\xd0\xce\xfd\xe5\xb3\x05\xf9\x29\xbf\xdd\x3f\x57\xa8\x5a\xa5\x88\x0a\x57\xca\xe0\x9b\x8b\x15\x7c\x19\xcf\x68\x6c\x34\xe9\xe2\xa5\x54\xe6\x51\x9d\x71\x3f\xc7\x37\xd1\x8f\xbe\xcb\x98\x1c\x2f\xcb\x99\x00\x09\x0e\x9a\x7b\xd5\x22\xa6\x34\xad\xe1\x36\xe3\x61\x15\x2e\x04\x5a\x4b\x1f\x01\x03\x70\x99\xb4\x93\xa2\xbe\x6e\xb5\x8d\x28\x8d\x56\x08\x1a\xea\xed\x68\x15\xfb\x36\x90\xcc\xc1\x14\x8c\x88\xf3\x57\xbb\xb9\xe0\x06\x79\xeb\xc1\xbf\x88\x4c\x2a\xbf\x52\x7b\xfc\x0e\x21\xfa\x1d\xee\xfc\x09\xfc\xb3\x33\x59\x41\x50\xc2\x10\x5d\xa4\x7a\x2c\xc1\x62\x4b\x51\x71\x5f\x58\xe9\x58\xfb\x28\x2d\x2a\x98\x33\xa4\x8f\x4b\xc4\xbd\xac\xc3\x9c\x62\xaa\x5d\xe6\x22\xbb\x75\x5d\xdf\xe4\x3b\xb4\x19\x0d\xbf\x35\x9d\x3f\x46\x1f\x65\x02\x09\x18\x90\x41\xcd\x4d\xe4\x34\xb3\x3e\x8a\x49\x6e\x74\xd1\xc7\x6b\xef\x87\x21\xec\x1e\xe9\xc9\x0a\x1a\xab\xa8\x46\x80\xd8\x9b\xcd\x10\x43\x50\x37\xac\xb3\x2f\x2a\x1a\xa2\xa8\xc9\x17\xb5\x55\xe7\x80\xe2\x83\x80\x87\x05\x6d\xd1\x7b\x0f\x2e\x08\xbd\x87\x36\x42\xd8\x40\x9e\x14\xdd\xb7\x14\xd6\x45\xaa\xc2\x0d\x0b\xc9\x42\x98\x75\xfd\x73\x5b\x7d\x04\x6b\xa6\xf2\x37\x7d\x27\x0b\x77\x1f\xc3\x9c\x0f\xbc\x4e\xd3\x8b\x26\x53\x25\xd2\x9b\x52\xb3\xc9\x62\xec\x83\x31\x60\x6f\x73\x28\x5e\x36\xfe\xd1\xd0\x99\xec\xb2\x0f\xf3\xab\xab\x00\x05\x55\x1f\x2c\xb3\x9e\x0a\x98\xf1\xfc\x1d\x5b\x09\x93\x27\x9f\xa5\x3a\xcf\xfd\xff\xc7\xc7\x3e\x19\x44\xcd\x42\x96\xad\xf2\x36\x50\xde\x61\xda\x1e\x23\x24\xc2\x41\x2b\x00\x39\x3f\x68\x72\x30\x22\x75\x11\x32\x57\x12\x79\x2f\x2a\xd8\x34\xea\xa9\x16\x79\x68\xed\x71\x42\x2c\x1f\x6e\x6a\x6a\x6a\x2b\x3a\x3e\x5d\x9c\xff\xfc\xb1\xa6\x99\x58\x24\x5f\x0c\x41\x14\x1e\x19\xa3\x17\x5b\xf0\x15\x1b\x0c\x4a\x3a\x61\x3b\x07\x09\x59\x04\x13\x4d\x39\xca\xb9\xa6\x81\x7d\xfe\x73\x36\x6a\xcf\xcb\x88\x63\xc1\x71\x6f\x3d\x44\xf9\xa9\xab\x72\xb8\x00\x9b\x25\x5d\x63\xe3\xbc\x60\x16\x57\xd2\xfc\xd1\xeb\x9a\x12\x72\x31\xce\x97\x5e\x05\xff\x98\x34\x7b\x3c\xc1\x07\xfb\x03\x2a\xf1\xfc\xb5\x33\x4c\x79\xc8\x2e\xc3\xff\x45\x17\x96\xc9\x58\x50', 2)
| 36,091
| 36,091
| 0.749993
| 9,018
| 36,091
| 3.000222
| 0.028942
| 0.004879
| 0.005322
| 0.004879
| 0.002218
| 0.001663
| 0.001663
| 0
| 0
| 0
| 0
| 0.313401
| 0.000083
| 36,091
| 1
| 36,091
| 36,091
| 0.436322
| 0
| 0
| 0
| 0
| 1
| 0.998892
| 0.998892
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
1f4e9ae0eaec41b60428b1f669b2a4a577ed8820
| 201
|
py
|
Python
|
tests/tokens/docstring1.py
|
akshanshbhatt/lpython
|
70fef49dbbb6cbb0447f7013231171e5c8b8e5df
|
[
"BSD-3-Clause"
] | 31
|
2022-01-07T23:56:33.000Z
|
2022-03-29T16:09:02.000Z
|
tests/tokens/docstring1.py
|
akshanshbhatt/lpython
|
70fef49dbbb6cbb0447f7013231171e5c8b8e5df
|
[
"BSD-3-Clause"
] | 197
|
2021-12-29T19:01:41.000Z
|
2022-03-31T15:58:25.000Z
|
tests/tokens/docstring1.py
|
akshanshbhatt/lpython
|
70fef49dbbb6cbb0447f7013231171e5c8b8e5df
|
[
"BSD-3-Clause"
] | 17
|
2022-01-06T15:34:36.000Z
|
2022-03-31T13:55:33.000Z
|
def test1():
"""A multi-line
docstring.
"""
def test2():
"""
A multi-line
docstring.
"""
def test2():
""" A single-line docstring."""
"""
A multi-line
docstring.
"""
| 10.578947
| 35
| 0.502488
| 22
| 201
| 4.590909
| 0.363636
| 0.514851
| 0.29703
| 0.564356
| 0.544554
| 0.544554
| 0.544554
| 0
| 0
| 0
| 0
| 0.021127
| 0.293532
| 201
| 18
| 36
| 11.166667
| 0.690141
| 0.358209
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
2f1cf17a05fadbc357182fd1b89bb7ff0b3b85b4
| 51,015
|
py
|
Python
|
model/train_utils.py
|
statsu1990/kaggle_google_quest_qa
|
4b3569aa6d8b58d2315301a3ad86e6ed1d71c6db
|
[
"MIT"
] | 3
|
2020-02-13T02:11:02.000Z
|
2021-09-05T13:15:34.000Z
|
model/train_utils.py
|
statsu1990/kaggle_google_quest_qa
|
4b3569aa6d8b58d2315301a3ad86e6ed1d71c6db
|
[
"MIT"
] | null | null | null |
model/train_utils.py
|
statsu1990/kaggle_google_quest_qa
|
4b3569aa6d8b58d2315301a3ad86e6ed1d71c6db
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.optim.lr_scheduler import _LRScheduler
from torchcontrib.optim import SWA
from tqdm import tqdm
from scipy.stats import spearmanr
import pandas as pd
import numpy as np
import warnings
warnings.simplefilter('ignore')
def save_log(loglist, filename):
df = pd.DataFrame(loglist)
df.columns = ['epoch', 'train_loss', 'train_score', 'test_loss', 'test_score']
df.to_csv(filename)
def compute_spearmanr(original, preds):
#score = 0
#for i in range(30):
# score += np.nan_to_num(spearmanr(original[:, i], preds[:, i]).correlation)
scores = []
for i in range(30):
scores.append(spearmanr(original[:, i], preds[:, i]).correlation)
print(scores)
return np.nanmean(scores)
class WarmUpLR(_LRScheduler):
"""warmup_training learning rate scheduler
Args:
optimizer: optimzier(e.g. SGD)
total_iters: totoal_iters of warmup phase
"""
def __init__(self, optimizer, total_iters, last_epoch=-1):
self.total_iters = total_iters
super().__init__(optimizer, last_epoch)
def get_lr(self):
"""we will use the first m batches, and set the learning
rate to base_lr * m / total_iters
"""
return [base_lr * self.last_epoch / (self.total_iters + 1e-8) for base_lr in self.base_lrs]
def pairwise_bce_logit_loss(outputs, targets):
"""
outputs: logits
"""
batch_size = outputs.size()[0]
if batch_size < 3:
pair_idx = np.arange(batch_size, dtype=np.int64)[::-1].copy()
pair_idx = torch.from_numpy(pair_idx).cuda()
else:
pair_idx = torch.randperm(batch_size).cuda()
diff_outputs = outputs - outputs[pair_idx]
diff_targets = targets - targets[pair_idx]
diff_targets = (1 + diff_targets) / 2
loss = nn.BCEWithLogitsLoss()(diff_outputs, diff_targets)
return loss
def pairwise_l1_logit_loss(outputs, targets):
"""
outputs: logits
"""
batch_size = outputs.size()[0]
if batch_size < 3:
pair_idx = np.arange(batch_size, dtype=np.int64)[::-1].copy()
pair_idx = torch.from_numpy(pair_idx).cuda()
else:
pair_idx = torch.randperm(batch_size).cuda()
diff_outputs = torch.sigmoid(outputs) - torch.sigmoid(outputs[pair_idx])
diff_targets = targets - targets[pair_idx]
loss = nn.L1Loss()(diff_outputs, diff_targets)
return loss
def pairwise_l1_loss(outputs, targets):
"""
"""
batch_size = outputs.size()[0]
if batch_size < 3:
pair_idx = np.arange(batch_size, dtype=np.int64)[::-1].copy()
pair_idx = torch.from_numpy(pair_idx).cuda()
else:
pair_idx = torch.randperm(batch_size).cuda()
#diff_outputs = torch.sigmoid(outputs) - torch.sigmoid(outputs[pair_idx])
diff_outputs = outputs - outputs[pair_idx]
diff_targets = targets - targets[pair_idx]
loss = nn.L1Loss()(diff_outputs, diff_targets)
return loss
def mseloss(outputs, targets):
return torch.mean(torch.pow(torch.sub(outputs, targets), 2))
def wrapper_comb_point_pair_loss(pointwise_lossfunc, pairwise_lossfunc, pair_weight=1.0):
def comb_point_pair_loss(outputs, targets):
point_loss = pointwise_lossfunc(outputs, targets)
pair_loss = pairwise_lossfunc(outputs, targets)
loss = (1 - pair_weight) * point_loss + pair_weight * pair_loss
return loss
return comb_point_pair_loss
def train_model_v0(net, trainloader, validloader, epochs, lr, warmup_epoch=1, milestones=[5, 10], gamma=0.2):
net = net.cuda()
criterion = nn.BCEWithLogitsLoss()
#optimizer = optim.Adam(net.parameters(), lr=lr)
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma) #learning rate decay
warmup_scheduler = WarmUpLR(optimizer, len(trainloader) * warmup_epoch)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
for batch_idx, (ids, masks, segments, targets) in enumerate(tqdm(trainloader)):
if epoch < warmup_epoch:
warmup_scheduler.step()
ids, masks, segments, targets = ids.cuda(), masks.cuda(), segments.cuda(), targets.cuda()
optimizer.zero_grad()
outputs = net(ids, masks, segments)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
print(loss.item())
print('Train Loss: %.3f' % (train_loss/(batch_idx+1),))
return epoch, train_loss/(batch_idx+1)
def test(epoch):
net.eval()
test_loss = 0
with torch.no_grad():
for batch_idx, (ids, masks, segments, targets) in enumerate(tqdm(validloader)):
ids, masks, segments, targets = ids.cuda(), masks.cuda(), segments.cuda(), targets.cuda()
outputs = net(ids, masks, segments)
loss = criterion(outputs, targets)
test_loss += loss.item()
print('Vali Loss: %.3f, ' % (test_loss/(batch_idx+1), ))
return epoch, test_loss/(batch_idx+1)
loglist = []
for epoch in range(0, epochs):
if epoch > warmup_epoch - 1:
scheduler.step(epoch)
ep, tr_ls = train(epoch)
ep, ts_ls = test(epoch)
loglist.append([ep, tr_ls, ts_ls])
save_log(loglist, 'training_log.csv')
return net
def train_model_v1(net, trainloader, validloader, epochs, lr, grad_accum_steps=1, warmup_epoch=1, milestones=[5, 10], gamma=0.2):
net = net.cuda()
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(net.parameters(), lr=lr)
#optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma) #learning rate decay
warmup_scheduler = WarmUpLR(optimizer, len(trainloader) * warmup_epoch)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
train_score = 0
preds = []
original = []
optimizer.zero_grad()
for batch_idx, (ids, masks, segments, targets) in enumerate(tqdm(trainloader)):
if epoch < warmup_epoch:
warmup_scheduler.step()
ids, masks, segments, targets = ids.cuda(), masks.cuda(), segments.cuda(), targets.cuda()
outputs = net(ids, masks, segments)
loss = criterion(outputs, targets)
loss = loss / grad_accum_steps
loss.backward()
if (batch_idx + 1) % grad_accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item() * grad_accum_steps
#print(loss.item() * grad_accum_steps)
with torch.no_grad():
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
train_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Train Loss: %.3f, Score: %.3f' % (train_loss/(batch_idx+1), train_score))
return epoch, train_loss/(batch_idx+1), train_score
def test(epoch):
net.eval()
test_loss = 0
test_score = 0
preds = []
original = []
with torch.no_grad():
for batch_idx, (ids, masks, segments, targets) in enumerate(tqdm(validloader)):
ids, masks, segments, targets = ids.cuda(), masks.cuda(), segments.cuda(), targets.cuda()
outputs = net(ids, masks, segments)
loss = criterion(outputs, targets)
test_loss += loss.item()
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
test_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Vali Loss: %.3f, Score: %.3f' % (test_loss/(batch_idx+1), test_score))
return epoch, test_loss/(batch_idx+1), test_score
loglist = []
for epoch in range(0, epochs):
if epoch > warmup_epoch - 1:
scheduler.step(epoch)
ep, tr_ls, tr_sc = train(epoch)
ep, ts_ls, ts_sc = test(epoch)
loglist.append([ep, tr_ls, tr_sc, ts_ls, ts_sc])
save_log(loglist, 'training_log.csv')
return net
def train_model_v2(net, trainloader, validloader, epochs, lr, grad_accum_steps=1, warmup_epoch=1, milestones=[5, 10], gamma=0.2):
net = net.cuda()
criterion = nn.L1Loss()
optimizer = optim.Adam(net.parameters(), lr=lr)
#optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma) #learning rate decay
warmup_scheduler = WarmUpLR(optimizer, len(trainloader) * warmup_epoch)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
train_score = 0
preds = []
original = []
optimizer.zero_grad()
for batch_idx, (ids, masks, segments, targets) in enumerate(tqdm(trainloader)):
if epoch < warmup_epoch:
warmup_scheduler.step()
ids, masks, segments, targets = ids.cuda(), masks.cuda(), segments.cuda(), targets.cuda()
outputs = net(ids, masks, segments)
loss = criterion(outputs, targets)
loss = loss / grad_accum_steps
loss.backward()
if (batch_idx + 1) % grad_accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item() * grad_accum_steps
#print(loss.item() * grad_accum_steps)
with torch.no_grad():
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
train_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Train Loss: %.3f, Score: %.3f' % (train_loss/(batch_idx+1), train_score))
return epoch, train_loss/(batch_idx+1), train_score
def test(epoch):
net.eval()
test_loss = 0
test_score = 0
preds = []
original = []
with torch.no_grad():
for batch_idx, (ids, masks, segments, targets) in enumerate(tqdm(validloader)):
ids, masks, segments, targets = ids.cuda(), masks.cuda(), segments.cuda(), targets.cuda()
outputs = net(ids, masks, segments)
loss = criterion(outputs, targets)
test_loss += loss.item()
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
test_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Vali Loss: %.3f, Score: %.3f' % (test_loss/(batch_idx+1), test_score))
return epoch, test_loss/(batch_idx+1), test_score
loglist = []
for epoch in range(0, epochs):
if epoch > warmup_epoch - 1:
scheduler.step(epoch)
ep, tr_ls, tr_sc = train(epoch)
ep, ts_ls, ts_sc = test(epoch)
loglist.append([ep, tr_ls, tr_sc, ts_ls, ts_sc])
save_log(loglist, 'training_log.csv')
return net
def train_model_v3(net, trainloader, validloader, epochs, lr, grad_accum_steps=1, warmup_epoch=1, milestones=[5, 10], gamma=0.2):
net = net.cuda()
criterion = nn.L1Loss()
#optimizer = optim.Adam(net.parameters(), lr=lr)
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma) #learning rate decay
warmup_scheduler = WarmUpLR(optimizer, len(trainloader) * warmup_epoch)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
train_score = 0
preds = []
original = []
optimizer.zero_grad()
for batch_idx, (ids, masks, segments, targets) in enumerate(tqdm(trainloader)):
if epoch < warmup_epoch:
warmup_scheduler.step()
ids, masks, segments, targets = ids.cuda(), masks.cuda(), segments.cuda(), targets.cuda()
outputs = net(ids, masks, segments)
loss = criterion(outputs, targets)
loss = loss / grad_accum_steps
loss.backward()
if (batch_idx + 1) % grad_accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item() * grad_accum_steps
#print(loss.item() * grad_accum_steps)
with torch.no_grad():
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
train_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Train Loss: %.3f, Score: %.3f' % (train_loss/(batch_idx+1), train_score))
return epoch, train_loss/(batch_idx+1), train_score
def test(epoch):
net.eval()
test_loss = 0
test_score = 0
preds = []
original = []
with torch.no_grad():
for batch_idx, (ids, masks, segments, targets) in enumerate(tqdm(validloader)):
ids, masks, segments, targets = ids.cuda(), masks.cuda(), segments.cuda(), targets.cuda()
outputs = net(ids, masks, segments)
loss = criterion(outputs, targets)
test_loss += loss.item()
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
test_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Vali Loss: %.3f, Score: %.3f' % (test_loss/(batch_idx+1), test_score))
return epoch, test_loss/(batch_idx+1), test_score
loglist = []
for epoch in range(0, epochs):
if epoch > warmup_epoch - 1:
scheduler.step(epoch)
ep, tr_ls, tr_sc = train(epoch)
ep, ts_ls, ts_sc = test(epoch)
loglist.append([ep, tr_ls, tr_sc, ts_ls, ts_sc])
save_log(loglist, 'training_log.csv')
return net
def train_model_v4(net, trainloader, validloader, epochs, lr, grad_accum_steps=1, warmup_epoch=1, milestones=[5, 10], gamma=0.2):
net = net.cuda()
#criterion = nn.BCEWithLogitsLoss()
criterion = nn.L1Loss()
optimizer = optim.Adam(net.parameters(), lr=lr)
#optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma) #learning rate decay
warmup_scheduler = WarmUpLR(optimizer, len(trainloader) * warmup_epoch)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
train_score = 0
preds = []
original = []
optimizer.zero_grad()
for batch_idx, (ids, masks, segments, targets) in enumerate(tqdm(trainloader)):
if epoch < warmup_epoch:
warmup_scheduler.step()
ids, masks, segments, targets = ids.cuda(), masks.cuda(), segments.cuda(), targets.cuda()
outputs, _ = net(ids, masks, segments)
loss = criterion(outputs, targets)
loss = loss / grad_accum_steps
loss.backward()
if (batch_idx + 1) % grad_accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item() * grad_accum_steps
#print(loss.item() * grad_accum_steps)
with torch.no_grad():
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
train_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Train Loss: %.3f, Score: %.3f' % (train_loss/(batch_idx+1), train_score))
return epoch, train_loss/(batch_idx+1), train_score
def test(epoch):
net.eval()
test_loss = 0
test_score = 0
preds = []
original = []
with torch.no_grad():
for batch_idx, (ids, masks, segments, targets) in enumerate(tqdm(validloader)):
ids, masks, segments, targets = ids.cuda(), masks.cuda(), segments.cuda(), targets.cuda()
outputs, _ = net(ids, masks, segments)
loss = criterion(outputs, targets)
test_loss += loss.item()
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
test_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Vali Loss: %.3f, Score: %.3f' % (test_loss/(batch_idx+1), test_score))
return epoch, test_loss/(batch_idx+1), test_score
loglist = []
for epoch in range(0, epochs):
if epoch > warmup_epoch - 1:
scheduler.step(epoch)
ep, tr_ls, tr_sc = train(epoch)
ep, ts_ls, ts_sc = test(epoch)
loglist.append([ep, tr_ls, tr_sc, ts_ls, ts_sc])
save_log(loglist, 'training_log.csv')
return net
def train_model_v5(net, trainloader, validloader, epochs, lr, grad_accum_steps=1, warmup_epoch=1, milestones=[5, 10], gamma=0.2):
net = net.cuda()
criterion = nn.BCEWithLogitsLoss()
#criterion = nn.L1Loss()
optimizer = optim.Adam(net.parameters(), lr=lr)
#optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma) #learning rate decay
warmup_scheduler = WarmUpLR(optimizer, len(trainloader) * warmup_epoch)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
train_score = 0
preds = []
original = []
optimizer.zero_grad()
for batch_idx, (ids, masks, segments, targets) in enumerate(tqdm(trainloader)):
if epoch < warmup_epoch:
warmup_scheduler.step()
ids, masks, segments, targets = ids.cuda(), masks.cuda(), segments.cuda(), targets.cuda()
outputs, _ = net(ids, masks, segments)
loss = criterion(outputs, targets)
loss = loss / grad_accum_steps
loss.backward()
if (batch_idx + 1) % grad_accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item() * grad_accum_steps
#print(loss.item() * grad_accum_steps)
with torch.no_grad():
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
train_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Train Loss: %.3f, Score: %.3f' % (train_loss/(batch_idx+1), train_score))
return epoch, train_loss/(batch_idx+1), train_score
def test(epoch):
net.eval()
test_loss = 0
test_score = 0
preds = []
original = []
with torch.no_grad():
for batch_idx, (ids, masks, segments, targets) in enumerate(tqdm(validloader)):
ids, masks, segments, targets = ids.cuda(), masks.cuda(), segments.cuda(), targets.cuda()
outputs, _ = net(ids, masks, segments)
loss = criterion(outputs, targets)
test_loss += loss.item()
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
test_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Vali Loss: %.3f, Score: %.3f' % (test_loss/(batch_idx+1), test_score))
return epoch, test_loss/(batch_idx+1), test_score
loglist = []
for epoch in range(0, epochs):
if epoch > warmup_epoch - 1:
scheduler.step(epoch)
ep, tr_ls, tr_sc = train(epoch)
ep, ts_ls, ts_sc = test(epoch)
loglist.append([ep, tr_ls, tr_sc, ts_ls, ts_sc])
save_log(loglist, 'training_log.csv')
return net
def train_model_sepQA_v1(net, trainloader, validloader, epochs, lr, grad_accum_steps=1, warmup_epoch=1, milestones=[5, 10], gamma=0.2, l2=0.0):
net = net.cuda()
criterion = nn.BCEWithLogitsLoss()
#criterion = nn.L1Loss()
#optimizer = optim.Adam(net.parameters(), lr=lr)
optimizer = optim.AdamW(net.parameters(), lr=lr, weight_decay=l2)
#optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma) #learning rate decay
warmup_scheduler = WarmUpLR(optimizer, len(trainloader) * warmup_epoch)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
train_score = 0
preds = []
original = []
optimizer.zero_grad()
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(trainloader)):
if epoch < warmup_epoch:
warmup_scheduler.step()
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _ = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
loss = criterion(outputs, targets)
loss = loss / grad_accum_steps
loss.backward()
if (batch_idx + 1) % grad_accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item() * grad_accum_steps
#print(loss.item() * grad_accum_steps)
with torch.no_grad():
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
train_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Train Loss: %.3f, Score: %.3f' % (train_loss/(batch_idx+1), train_score))
return epoch, train_loss/(batch_idx+1), train_score
def test(epoch):
net.eval()
test_loss = 0
test_score = 0
preds = []
original = []
with torch.no_grad():
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(validloader)):
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _ = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
loss = criterion(outputs, targets)
test_loss += loss.item()
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
test_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Vali Loss: %.3f, Score: %.3f' % (test_loss/(batch_idx+1), test_score))
return epoch, test_loss/(batch_idx+1), test_score
loglist = []
for epoch in range(0, epochs):
if epoch > warmup_epoch - 1:
scheduler.step(epoch)
ep, tr_ls, tr_sc = train(epoch)
ep, ts_ls, ts_sc = test(epoch)
loglist.append([ep, tr_ls, tr_sc, ts_ls, ts_sc])
save_log(loglist, 'training_log.csv')
return net
def train_model_sepQA_v1_1(net, trainloader, validloader, epochs, lr, grad_accum_steps=1, warmup_epoch=1, milestones=[5, 10], gamma=0.2, l2=0.0):
net = net.cuda()
criterion = nn.BCEWithLogitsLoss()
#criterion = mseloss #nn.MSELoss()
#criterion = nn.L1Loss()
#optimizer = optim.Adam(net.parameters(), lr=lr)
optimizer = optim.AdamW(net.parameters(), lr=lr, weight_decay=l2)
#optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma) #learning rate decay
warmup_scheduler = WarmUpLR(optimizer, len(trainloader) * warmup_epoch)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
train_score = 0
preds = []
original = []
optimizer.zero_grad()
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(trainloader)):
if epoch < warmup_epoch:
warmup_scheduler.step()
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _ = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
loss = criterion(outputs, targets)
loss = loss / grad_accum_steps
loss.backward()
if (batch_idx + 1) % grad_accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item() * grad_accum_steps
#print(loss.item() * grad_accum_steps)
with torch.no_grad():
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
train_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Train Loss: %.3f, Score: %.3f' % (train_loss/(batch_idx+1), train_score))
return epoch, train_loss/(batch_idx+1), train_score
def test(epoch):
net.eval()
test_loss = 0
test_score = 0
preds = []
original = []
with torch.no_grad():
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(validloader)):
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _ = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
loss = criterion(outputs, targets)
test_loss += loss.item()
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
test_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Vali Loss: %.3f, Score: %.3f' % (test_loss/(batch_idx+1), test_score))
return epoch, test_loss/(batch_idx+1), test_score
loglist = []
for epoch in range(0, epochs):
if epoch > warmup_epoch - 1:
scheduler.step(epoch)
ep, tr_ls, tr_sc = train(epoch)
ep, ts_ls, ts_sc = test(epoch)
loglist.append([ep, tr_ls, tr_sc, ts_ls, ts_sc])
save_log(loglist, 'training_log.csv')
return net
def train_model_sepQA_v1_2_mix(net, trainloader, validloader, epochs, lr, grad_accum_steps=1, warmup_epoch=1, milestones=[5, 10], gamma=0.2, l2=0.0):
"""
mixup
"""
net = net.cuda()
criterion = nn.BCEWithLogitsLoss()
#criterion = mseloss #nn.MSELoss()
#criterion = nn.L1Loss()
#optimizer = optim.Adam(net.parameters(), lr=lr)
optimizer = optim.AdamW(net.parameters(), lr=lr, weight_decay=l2)
#optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma) #learning rate decay
warmup_scheduler = WarmUpLR(optimizer, len(trainloader) * warmup_epoch)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
train_score = 0
preds = []
original = []
optimizer.zero_grad()
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(trainloader)):
if epoch < warmup_epoch:
warmup_scheduler.step()
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _, mix_idx, mix_rate = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
targets = mix_rate * targets + (1 - mix_rate) * targets[mix_idx]
loss = criterion(outputs, targets)
loss = loss / grad_accum_steps
loss.backward()
if (batch_idx + 1) % grad_accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item() * grad_accum_steps
#print(loss.item() * grad_accum_steps)
with torch.no_grad():
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
train_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Train Loss: %.3f, Score: %.3f' % (train_loss/(batch_idx+1), train_score))
return epoch, train_loss/(batch_idx+1), train_score
def test(epoch):
net.eval()
test_loss = 0
test_score = 0
preds = []
original = []
with torch.no_grad():
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(validloader)):
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _ = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
loss = criterion(outputs, targets)
test_loss += loss.item()
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
test_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Vali Loss: %.3f, Score: %.3f' % (test_loss/(batch_idx+1), test_score))
return epoch, test_loss/(batch_idx+1), test_score
loglist = []
for epoch in range(0, epochs):
if epoch > warmup_epoch - 1:
scheduler.step(epoch)
ep, tr_ls, tr_sc = train(epoch)
ep, ts_ls, ts_sc = test(epoch)
loglist.append([ep, tr_ls, tr_sc, ts_ls, ts_sc])
save_log(loglist, 'training_log.csv')
return net
def train_model_sepQA_v1_3(net, trainloader, validloader, epochs, lr, grad_accum_steps=1, warmup_epoch=1, milestones=[5, 10], gamma=0.2, l2=0.0, tg_indexs=None):
net = net.cuda()
#criterion = nn.BCEWithLogitsLoss()
criterion = MultiLossWrapper(nn.BCEWithLogitsLoss(), tg_indexs)
#criterion = mseloss #nn.MSELoss()
#criterion = nn.L1Loss()
#optimizer = optim.Adam(net.parameters(), lr=lr)
optimizer = optim.AdamW(net.parameters(), lr=lr, weight_decay=l2)
#optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma) #learning rate decay
warmup_scheduler = WarmUpLR(optimizer, len(trainloader) * warmup_epoch)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
train_score = 0
preds = []
original = []
optimizer.zero_grad()
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(trainloader)):
if epoch < warmup_epoch:
warmup_scheduler.step()
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _ = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
loss = criterion(outputs, targets)
loss = loss / grad_accum_steps
loss.backward()
if (batch_idx + 1) % grad_accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item() * grad_accum_steps
#print(loss.item() * grad_accum_steps)
with torch.no_grad():
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
train_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Train Loss: %.3f, Score: %.3f' % (train_loss/(batch_idx+1), train_score))
return epoch, train_loss/(batch_idx+1), train_score
def test(epoch):
net.eval()
test_loss = 0
test_score = 0
preds = []
original = []
with torch.no_grad():
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(validloader)):
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _ = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
loss = criterion(outputs, targets)
test_loss += loss.item()
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
test_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Vali Loss: %.3f, Score: %.3f' % (test_loss/(batch_idx+1), test_score))
return epoch, test_loss/(batch_idx+1), test_score
loglist = []
for epoch in range(0, epochs):
if epoch > warmup_epoch - 1:
scheduler.step(epoch)
ep, tr_ls, tr_sc = train(epoch)
ep, ts_ls, ts_sc = test(epoch)
loglist.append([ep, tr_ls, tr_sc, ts_ls, ts_sc])
save_log(loglist, 'training_log.csv')
return net
def train_model_sepQA_v1_4(net, trainloader, validloader, epochs, lr, grad_accum_steps=1, warmup_epoch=1, milestones=[5, 10], gamma=0.2, l2=0.0, tg_indexs=None):
net = net.cuda()
#criterion = nn.BCEWithLogitsLoss()
criterion = MultiLossWrapper_AllAverage(nn.BCEWithLogitsLoss(), tg_indexs)
#criterion = mseloss #nn.MSELoss()
#criterion = nn.L1Loss()
#optimizer = optim.Adam(net.parameters(), lr=lr)
optimizer = optim.AdamW(net.parameters(), lr=lr, weight_decay=l2)
#optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma) #learning rate decay
warmup_scheduler = WarmUpLR(optimizer, len(trainloader) * warmup_epoch)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
train_score = 0
preds = []
original = []
optimizer.zero_grad()
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(trainloader)):
if epoch < warmup_epoch:
warmup_scheduler.step()
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _ = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
loss = criterion(outputs, targets)
loss = loss / grad_accum_steps
loss.backward()
if (batch_idx + 1) % grad_accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item() * grad_accum_steps
#print(loss.item() * grad_accum_steps)
with torch.no_grad():
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
train_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Train Loss: %.3f, Score: %.3f' % (train_loss/(batch_idx+1), train_score))
return epoch, train_loss/(batch_idx+1), train_score
def test(epoch):
net.eval()
test_loss = 0
test_score = 0
preds = []
original = []
with torch.no_grad():
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(validloader)):
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _ = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
loss = criterion(outputs, targets)
test_loss += loss.item()
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
test_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Vali Loss: %.3f, Score: %.3f' % (test_loss/(batch_idx+1), test_score))
return epoch, test_loss/(batch_idx+1), test_score
loglist = []
for epoch in range(0, epochs):
if epoch > warmup_epoch - 1:
scheduler.step(epoch)
ep, tr_ls, tr_sc = train(epoch)
ep, ts_ls, ts_sc = test(epoch)
loglist.append([ep, tr_ls, tr_sc, ts_ls, ts_sc])
save_log(loglist, 'training_log.csv')
return net
def MultiLossWrapper_AllAverage(loss_func, tg_indexs):
def LossFunc(outputs, targets):
num = outputs.size()[1]
loss = 0
ave_output = None
for i in range(num):
if i in tg_indexs:
if ave_output is None:
ave_output = outputs[:,i]
else:
ave_output += outputs[:,i]
ave_output = ave_output / len(tg_indexs)
for i in range(num):
if i in tg_indexs:
loss += loss_func(ave_output, targets[:,i])
loss = loss / len(tg_indexs)
return loss
return LossFunc
# pair
def train_model_sepQA_v2(net, trainloader, validloader, epochs, lr, grad_accum_steps=1, warmup_epoch=1, milestones=[5, 10], gamma=0.2, pair_w=None, l2=0.0):
"""
pair
"""
net = net.cuda()
if pair_w is None:
PAIR_WEIHGT = 1.0
else:
PAIR_WEIHGT = pair_w
criterion = wrapper_comb_point_pair_loss(nn.BCEWithLogitsLoss(), pairwise_l1_logit_loss, PAIR_WEIHGT)
#criterion = wrapper_comb_point_pair_loss(nn.L1Loss(), pairwise_l1_loss, PAIR_WEIHGT)
#criterion = nn.BCEWithLogitsLoss()
#criterion = nn.L1Loss()
#optimizer = optim.Adam(net.parameters(), lr=lr)
optimizer = optim.AdamW(net.parameters(), lr=lr, weight_decay=l2)
#optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0,)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma) #learning rate decay
warmup_scheduler = WarmUpLR(optimizer, len(trainloader) * warmup_epoch)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
train_score = 0
preds = []
original = []
optimizer.zero_grad()
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(trainloader)):
if epoch < warmup_epoch:
warmup_scheduler.step()
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _ = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
loss = criterion(outputs, targets)
loss = loss / grad_accum_steps
loss.backward()
if (batch_idx + 1) % grad_accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item() * grad_accum_steps
#print(loss.item() * grad_accum_steps)
with torch.no_grad():
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
train_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Train Loss: %.3f, Score: %.3f' % (train_loss/(batch_idx+1), train_score))
return epoch, train_loss/(batch_idx+1), train_score
def test(epoch):
net.eval()
test_loss = 0
test_score = 0
preds = []
original = []
with torch.no_grad():
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(validloader)):
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _ = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
loss = criterion(outputs, targets)
test_loss += loss.item()
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
test_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Vali Loss: %.3f, Score: %.3f' % (test_loss/(batch_idx+1), test_score))
return epoch, test_loss/(batch_idx+1), test_score
loglist = []
for epoch in range(0, epochs):
if epoch > warmup_epoch - 1:
scheduler.step(epoch)
ep, tr_ls, tr_sc = train(epoch)
ep, ts_ls, ts_sc = test(epoch)
loglist.append([ep, tr_ls, tr_sc, ts_ls, ts_sc])
save_log(loglist, 'training_log.csv')
return net
# swa
def train_model_sepQA_v3_1(net, trainloader, validloader, epochs, lr,
swa_start_epoch, swa_freq_step,
grad_accum_steps=1, warmup_epoch=1, milestones=[5, 10], gamma=0.2, l2=0.0,
):
net = net.cuda()
criterion = nn.BCEWithLogitsLoss()
base_optimizer = optim.AdamW(net.parameters(), lr=lr, weight_decay=l2)
optimizer = SWA(base_optimizer)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma) #learning rate decay
warmup_scheduler = WarmUpLR(optimizer, len(trainloader) * warmup_epoch)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
train_score = 0
preds = []
original = []
optimizer.zero_grad()
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(trainloader)):
if epoch < warmup_epoch:
warmup_scheduler.step()
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _ = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
loss = criterion(outputs, targets)
loss = loss / grad_accum_steps
loss.backward()
if (batch_idx + 1) % grad_accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
if epoch >= swa_start_epoch and ((batch_idx + 1) % (grad_accum_steps * swa_freq_step)) == 0:
optimizer.update_swa()
train_loss += loss.item() * grad_accum_steps
#print(loss.item() * grad_accum_steps)
with torch.no_grad():
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
train_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Train Loss: %.3f, Score: %.3f' % (train_loss/(batch_idx+1), train_score))
return epoch, train_loss/(batch_idx+1), train_score
def test(epoch):
net.eval()
test_loss = 0
test_score = 0
preds = []
original = []
with torch.no_grad():
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(validloader)):
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _ = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
loss = criterion(outputs, targets)
test_loss += loss.item()
preds.append(outputs.cpu().numpy())
original.append(targets.cpu().numpy())
test_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Vali Loss: %.3f, Score: %.3f' % (test_loss/(batch_idx+1), test_score))
return epoch, test_loss/(batch_idx+1), test_score
loglist = []
for epoch in range(0, epochs):
if epoch > warmup_epoch - 1:
scheduler.step(epoch)
ep, tr_ls, tr_sc = train(epoch)
ep, ts_ls, ts_sc = test(epoch)
loglist.append([ep, tr_ls, tr_sc, ts_ls, ts_sc])
optimizer.swap_swa_sgd()
ep, ts_ls, ts_sc = test(epochs)
loglist.append([ep, -1, -1, ts_ls, ts_sc])
save_log(loglist, 'training_log.csv')
return net
# classification
def MultiLossWrapper(loss_func, tg_indexs):
def LossFunc(outputs, targets):
num = outputs.size()[1]
loss = 0
if tg_indexs is None:
for i in range(num):
loss += loss_func(outputs[:,i], targets[:,i])
loss = loss / num
else:
for i in range(num):
if i in tg_indexs:
loss += loss_func(outputs[:,i], targets[:,i])
loss = loss / len(tg_indexs)
return loss
return LossFunc
def train_model_sepQA_v4_1(net, trainloader, validloader, epochs, lr, grad_accum_steps=1, warmup_epoch=1, milestones=[5, 10], gamma=0.2, l2=0.0, tg_indexs=None):
net = net.cuda()
criterion = MultiLossWrapper(nn.CrossEntropyLoss(), tg_indexs)
optimizer = optim.AdamW(net.parameters(), lr=lr, weight_decay=l2)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma) #learning rate decay
warmup_scheduler = WarmUpLR(optimizer, len(trainloader) * warmup_epoch)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
train_score = 0
preds = []
original = []
optimizer.zero_grad()
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(trainloader)):
if epoch < warmup_epoch:
warmup_scheduler.step()
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _ = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
loss = criterion(outputs, targets)
loss = loss / grad_accum_steps
loss.backward()
if (batch_idx + 1) % grad_accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item() * grad_accum_steps
#print(loss.item() * grad_accum_steps)
with torch.no_grad():
preds.append(outputs.max(2)[1].cpu().numpy()) # label
original.append(targets.cpu().numpy())
train_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Train Loss: %.3f, Score: %.3f' % (train_loss/(batch_idx+1), train_score))
return epoch, train_loss/(batch_idx+1), train_score
def test(epoch):
net.eval()
test_loss = 0
test_score = 0
preds = []
original = []
with torch.no_grad():
for batch_idx, (q_ids, q_masks, q_segments, a_ids, a_masks, a_segments, targets) in enumerate(tqdm(validloader)):
q_ids, q_masks, q_segments, targets = q_ids.cuda(), q_masks.cuda(), q_segments.cuda(), targets.cuda()
a_ids, a_masks, a_segments = a_ids.cuda(), a_masks.cuda(), a_segments.cuda()
outputs, _ = net(q_ids, q_masks, q_segments, a_ids, a_masks, a_segments)
loss = criterion(outputs, targets)
test_loss += loss.item()
preds.append(outputs.max(2)[1].cpu().numpy())
original.append(targets.cpu().numpy())
test_score = compute_spearmanr(np.concatenate(original), np.concatenate(preds))
print('Vali Loss: %.3f, Score: %.3f' % (test_loss/(batch_idx+1), test_score))
return epoch, test_loss/(batch_idx+1), test_score
loglist = []
for epoch in range(0, epochs):
if epoch > warmup_epoch - 1:
scheduler.step(epoch)
ep, tr_ls, tr_sc = train(epoch)
ep, ts_ls, ts_sc = test(epoch)
loglist.append([ep, tr_ls, tr_sc, ts_ls, ts_sc])
save_log(loglist, 'training_log.csv')
return net
| 39.032135
| 162
| 0.586494
| 6,273
| 51,015
| 4.54631
| 0.032202
| 0.02749
| 0.022091
| 0.025527
| 0.929977
| 0.926505
| 0.919422
| 0.913987
| 0.912374
| 0.908903
| 0
| 0.012512
| 0.290307
| 51,015
| 1,306
| 163
| 39.062021
| 0.775197
| 0.053318
| 0
| 0.899033
| 0
| 0
| 0.025702
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06015
| false
| 0
| 0.011815
| 0.001074
| 0.131042
| 0.047261
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2f5dfa79ddfce8dec33fcda30a7f36afd0791530
| 102,059
|
py
|
Python
|
generated/resources/access_control_list_heat.py
|
atsgen/tf-heat-plugin
|
5c0405eb93287368f60f7e227e5af5ada6bfeed2
|
[
"Apache-2.0"
] | 1
|
2020-04-05T19:43:40.000Z
|
2020-04-05T19:43:40.000Z
|
generated/resources/access_control_list_heat.py
|
atsgen/tf-heat-plugin
|
5c0405eb93287368f60f7e227e5af5ada6bfeed2
|
[
"Apache-2.0"
] | null | null | null |
generated/resources/access_control_list_heat.py
|
atsgen/tf-heat-plugin
|
5c0405eb93287368f60f7e227e5af5ada6bfeed2
|
[
"Apache-2.0"
] | 1
|
2020-08-25T12:47:27.000Z
|
2020-08-25T12:47:27.000Z
|
# AUTO-GENERATED file from IFMapApiGenerator. Do Not Edit!
from contrail_heat.resources import contrail
try:
from heat.common.i18n import _
except ImportError:
pass
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
try:
from heat.openstack.common import log as logging
except ImportError:
from oslo_log import log as logging
import uuid
from vnc_api import vnc_api
LOG = logging.getLogger(__name__)
class ContrailAccessControlList(contrail.ContrailResource):
PROPERTIES = (
NAME, FQ_NAME, DISPLAY_NAME, ACCESS_CONTROL_LIST_ENTRIES, ACCESS_CONTROL_LIST_ENTRIES_DYNAMIC, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_PROTOCOL, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_IP_PREFIX, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_IP_PREFIX_LEN, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_VIRTUAL_NETWORK, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SECURITY_GROUP, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_NETWORK_POLICY, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST_IP_PREFIX, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST_IP_PREFIX_LEN, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT_START_PORT, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT_END_PORT, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_IP_PREFIX, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_IP_PREFIX_LEN, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_VIRTUAL_NETWORK, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SECURITY_GROUP, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_NETWORK_POLICY, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST_IP_PREFIX, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST_IP_PREFIX_LEN, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT_START_PORT, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT_END_PORT, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_ETHERTYPE, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_SIMPLE_ACTION, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_GATEWAY_NAME, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_APPLY_SERVICE, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ANALYZER_NAME, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ENCAPSULATION, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ANALYZER_IP_ADDRESS, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ROUTING_INSTANCE, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_UDP_PORT, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_ASSIGN_ROUTING_INSTANCE, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_LOG, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_ALERT, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_QOS_ACTION, ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_RULE_UUID, VIRTUAL_NETWORK, SECURITY_GROUP
) = (
'name', 'fq_name', 'display_name', 'access_control_list_entries', 'access_control_list_entries_dynamic', 'access_control_list_entries_acl_rule', 'access_control_list_entries_acl_rule_match_condition', 'access_control_list_entries_acl_rule_match_condition_protocol', 'access_control_list_entries_acl_rule_match_condition_src_address', 'access_control_list_entries_acl_rule_match_condition_src_address_subnet', 'access_control_list_entries_acl_rule_match_condition_src_address_subnet_ip_prefix', 'access_control_list_entries_acl_rule_match_condition_src_address_subnet_ip_prefix_len', 'access_control_list_entries_acl_rule_match_condition_src_address_virtual_network', 'access_control_list_entries_acl_rule_match_condition_src_address_security_group', 'access_control_list_entries_acl_rule_match_condition_src_address_network_policy', 'access_control_list_entries_acl_rule_match_condition_src_address_subnet_list', 'access_control_list_entries_acl_rule_match_condition_src_address_subnet_list_ip_prefix', 'access_control_list_entries_acl_rule_match_condition_src_address_subnet_list_ip_prefix_len', 'access_control_list_entries_acl_rule_match_condition_src_port', 'access_control_list_entries_acl_rule_match_condition_src_port_start_port', 'access_control_list_entries_acl_rule_match_condition_src_port_end_port', 'access_control_list_entries_acl_rule_match_condition_dst_address', 'access_control_list_entries_acl_rule_match_condition_dst_address_subnet', 'access_control_list_entries_acl_rule_match_condition_dst_address_subnet_ip_prefix', 'access_control_list_entries_acl_rule_match_condition_dst_address_subnet_ip_prefix_len', 'access_control_list_entries_acl_rule_match_condition_dst_address_virtual_network', 'access_control_list_entries_acl_rule_match_condition_dst_address_security_group', 'access_control_list_entries_acl_rule_match_condition_dst_address_network_policy', 'access_control_list_entries_acl_rule_match_condition_dst_address_subnet_list', 'access_control_list_entries_acl_rule_match_condition_dst_address_subnet_list_ip_prefix', 'access_control_list_entries_acl_rule_match_condition_dst_address_subnet_list_ip_prefix_len', 'access_control_list_entries_acl_rule_match_condition_dst_port', 'access_control_list_entries_acl_rule_match_condition_dst_port_start_port', 'access_control_list_entries_acl_rule_match_condition_dst_port_end_port', 'access_control_list_entries_acl_rule_match_condition_ethertype', 'access_control_list_entries_acl_rule_action_list', 'access_control_list_entries_acl_rule_action_list_simple_action', 'access_control_list_entries_acl_rule_action_list_gateway_name', 'access_control_list_entries_acl_rule_action_list_apply_service', 'access_control_list_entries_acl_rule_action_list_mirror_to', 'access_control_list_entries_acl_rule_action_list_mirror_to_analyzer_name', 'access_control_list_entries_acl_rule_action_list_mirror_to_encapsulation', 'access_control_list_entries_acl_rule_action_list_mirror_to_analyzer_ip_address', 'access_control_list_entries_acl_rule_action_list_mirror_to_routing_instance', 'access_control_list_entries_acl_rule_action_list_mirror_to_udp_port', 'access_control_list_entries_acl_rule_action_list_assign_routing_instance', 'access_control_list_entries_acl_rule_action_list_log', 'access_control_list_entries_acl_rule_action_list_alert', 'access_control_list_entries_acl_rule_action_list_qos_action', 'access_control_list_entries_acl_rule_rule_uuid', 'virtual_network', 'security_group'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('NAME.'),
update_allowed=True,
required=False,
),
FQ_NAME: properties.Schema(
properties.Schema.STRING,
_('FQ_NAME.'),
update_allowed=True,
required=False,
),
DISPLAY_NAME: properties.Schema(
properties.Schema.STRING,
_('DISPLAY_NAME.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES: properties.Schema(
properties.Schema.MAP,
_('ACCESS_CONTROL_LIST_ENTRIES.'),
update_allowed=True,
required=False,
schema={
ACCESS_CONTROL_LIST_ENTRIES_DYNAMIC: properties.Schema(
properties.Schema.BOOLEAN,
_('ACCESS_CONTROL_LIST_ENTRIES_DYNAMIC.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE: properties.Schema(
properties.Schema.LIST,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE.'),
update_allowed=True,
required=False,
schema=properties.Schema(
properties.Schema.MAP,
schema={
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION: properties.Schema(
properties.Schema.MAP,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION.'),
update_allowed=True,
required=False,
schema={
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_PROTOCOL: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_PROTOCOL.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS: properties.Schema(
properties.Schema.MAP,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS.'),
update_allowed=True,
required=False,
schema={
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET: properties.Schema(
properties.Schema.MAP,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET.'),
update_allowed=True,
required=False,
schema={
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_IP_PREFIX: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_IP_PREFIX.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_IP_PREFIX_LEN: properties.Schema(
properties.Schema.INTEGER,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_IP_PREFIX_LEN.'),
update_allowed=True,
required=False,
),
}
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_VIRTUAL_NETWORK: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_VIRTUAL_NETWORK.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SECURITY_GROUP: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SECURITY_GROUP.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_NETWORK_POLICY: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_NETWORK_POLICY.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST: properties.Schema(
properties.Schema.LIST,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST.'),
update_allowed=True,
required=False,
schema=properties.Schema(
properties.Schema.MAP,
schema={
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST_IP_PREFIX: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST_IP_PREFIX.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST_IP_PREFIX_LEN: properties.Schema(
properties.Schema.INTEGER,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST_IP_PREFIX_LEN.'),
update_allowed=True,
required=False,
),
}
)
),
}
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT: properties.Schema(
properties.Schema.MAP,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT.'),
update_allowed=True,
required=False,
schema={
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT_START_PORT: properties.Schema(
properties.Schema.INTEGER,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT_START_PORT.'),
update_allowed=True,
required=False,
constraints=[
constraints.Range(-1, 65535),
],
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT_END_PORT: properties.Schema(
properties.Schema.INTEGER,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT_END_PORT.'),
update_allowed=True,
required=False,
constraints=[
constraints.Range(-1, 65535),
],
),
}
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS: properties.Schema(
properties.Schema.MAP,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS.'),
update_allowed=True,
required=False,
schema={
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET: properties.Schema(
properties.Schema.MAP,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET.'),
update_allowed=True,
required=False,
schema={
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_IP_PREFIX: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_IP_PREFIX.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_IP_PREFIX_LEN: properties.Schema(
properties.Schema.INTEGER,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_IP_PREFIX_LEN.'),
update_allowed=True,
required=False,
),
}
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_VIRTUAL_NETWORK: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_VIRTUAL_NETWORK.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SECURITY_GROUP: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SECURITY_GROUP.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_NETWORK_POLICY: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_NETWORK_POLICY.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST: properties.Schema(
properties.Schema.LIST,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST.'),
update_allowed=True,
required=False,
schema=properties.Schema(
properties.Schema.MAP,
schema={
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST_IP_PREFIX: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST_IP_PREFIX.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST_IP_PREFIX_LEN: properties.Schema(
properties.Schema.INTEGER,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST_IP_PREFIX_LEN.'),
update_allowed=True,
required=False,
),
}
)
),
}
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT: properties.Schema(
properties.Schema.MAP,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT.'),
update_allowed=True,
required=False,
schema={
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT_START_PORT: properties.Schema(
properties.Schema.INTEGER,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT_START_PORT.'),
update_allowed=True,
required=False,
constraints=[
constraints.Range(-1, 65535),
],
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT_END_PORT: properties.Schema(
properties.Schema.INTEGER,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT_END_PORT.'),
update_allowed=True,
required=False,
constraints=[
constraints.Range(-1, 65535),
],
),
}
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_ETHERTYPE: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_ETHERTYPE.'),
update_allowed=True,
required=False,
constraints=[
constraints.AllowedValues([u'IPv4', u'IPv6']),
],
),
}
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST: properties.Schema(
properties.Schema.MAP,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST.'),
update_allowed=True,
required=False,
schema={
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_SIMPLE_ACTION: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_SIMPLE_ACTION.'),
update_allowed=True,
required=False,
constraints=[
constraints.AllowedValues([u'deny', u'pass']),
],
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_GATEWAY_NAME: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_GATEWAY_NAME.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_APPLY_SERVICE: properties.Schema(
properties.Schema.LIST,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_APPLY_SERVICE.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO: properties.Schema(
properties.Schema.MAP,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO.'),
update_allowed=True,
required=False,
schema={
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ANALYZER_NAME: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ANALYZER_NAME.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ENCAPSULATION: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ENCAPSULATION.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ANALYZER_IP_ADDRESS: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ANALYZER_IP_ADDRESS.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ROUTING_INSTANCE: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ROUTING_INSTANCE.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_UDP_PORT: properties.Schema(
properties.Schema.INTEGER,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_UDP_PORT.'),
update_allowed=True,
required=False,
),
}
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_ASSIGN_ROUTING_INSTANCE: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_ASSIGN_ROUTING_INSTANCE.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_LOG: properties.Schema(
properties.Schema.BOOLEAN,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_LOG.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_ALERT: properties.Schema(
properties.Schema.BOOLEAN,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_ALERT.'),
update_allowed=True,
required=False,
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_QOS_ACTION: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_QOS_ACTION.'),
update_allowed=True,
required=False,
),
}
),
ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_RULE_UUID: properties.Schema(
properties.Schema.STRING,
_('ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_RULE_UUID.'),
update_allowed=True,
required=False,
),
}
)
),
}
),
VIRTUAL_NETWORK: properties.Schema(
properties.Schema.STRING,
_('VIRTUAL_NETWORK.'),
update_allowed=True,
required=False,
),
SECURITY_GROUP: properties.Schema(
properties.Schema.STRING,
_('SECURITY_GROUP.'),
update_allowed=True,
required=False,
),
}
attributes_schema = {
NAME: attributes.Schema(
_('NAME.'),
),
FQ_NAME: attributes.Schema(
_('FQ_NAME.'),
),
DISPLAY_NAME: attributes.Schema(
_('DISPLAY_NAME.'),
),
ACCESS_CONTROL_LIST_ENTRIES: attributes.Schema(
_('ACCESS_CONTROL_LIST_ENTRIES.'),
),
VIRTUAL_NETWORK: attributes.Schema(
_('VIRTUAL_NETWORK.'),
),
SECURITY_GROUP: attributes.Schema(
_('SECURITY_GROUP.'),
),
}
update_allowed_keys = ('Properties',)
def handle_create(self):
parent_obj = None
if parent_obj is None and self.properties.get(self.VIRTUAL_NETWORK):
try:
parent_obj = self.vnc_lib().virtual_network_read(id=self.properties.get(self.VIRTUAL_NETWORK))
except vnc_api.NoIdError:
parent_obj = self.vnc_lib().virtual_network_read(fq_name_str=self.properties.get(self.VIRTUAL_NETWORK))
except:
parent_obj = None
if parent_obj is None and self.properties.get(self.SECURITY_GROUP):
try:
parent_obj = self.vnc_lib().security_group_read(id=self.properties.get(self.SECURITY_GROUP))
except vnc_api.NoIdError:
parent_obj = self.vnc_lib().security_group_read(fq_name_str=self.properties.get(self.SECURITY_GROUP))
except:
parent_obj = None
if parent_obj is None:
raise Exception('Error: parent is not specified in template!')
obj_0 = vnc_api.AccessControlList(name=self.properties[self.NAME],
parent_obj=parent_obj)
if self.properties.get(self.DISPLAY_NAME) is not None:
obj_0.set_display_name(self.properties.get(self.DISPLAY_NAME))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES) is not None:
obj_1 = vnc_api.AclEntriesType()
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_DYNAMIC) is not None:
obj_1.set_dynamic(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_DYNAMIC))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE) is not None:
for index_1 in range(len(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE))):
obj_2 = vnc_api.AclRuleType()
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION) is not None:
obj_3 = vnc_api.MatchConditionType()
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_PROTOCOL) is not None:
obj_3.set_protocol(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_PROTOCOL))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS) is not None:
obj_4 = vnc_api.AddressType()
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET) is not None:
obj_5 = vnc_api.SubnetType()
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_IP_PREFIX) is not None:
obj_5.set_ip_prefix(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_IP_PREFIX))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_IP_PREFIX_LEN) is not None:
obj_5.set_ip_prefix_len(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_IP_PREFIX_LEN))
obj_4.set_subnet(obj_5)
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_VIRTUAL_NETWORK) is not None:
obj_4.set_virtual_network(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_VIRTUAL_NETWORK))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SECURITY_GROUP) is not None:
obj_4.set_security_group(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SECURITY_GROUP))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_NETWORK_POLICY) is not None:
obj_4.set_network_policy(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_NETWORK_POLICY))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST) is not None:
for index_4 in range(len(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST))):
obj_5 = vnc_api.SubnetType()
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST_IP_PREFIX) is not None:
obj_5.set_ip_prefix(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST_IP_PREFIX))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST_IP_PREFIX_LEN) is not None:
obj_5.set_ip_prefix_len(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST_IP_PREFIX_LEN))
obj_4.add_subnet_list(obj_5)
obj_3.set_src_address(obj_4)
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT) is not None:
obj_4 = vnc_api.PortType()
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT_START_PORT) is not None:
obj_4.set_start_port(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT_START_PORT))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT_END_PORT) is not None:
obj_4.set_end_port(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT_END_PORT))
obj_3.set_src_port(obj_4)
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS) is not None:
obj_4 = vnc_api.AddressType()
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET) is not None:
obj_5 = vnc_api.SubnetType()
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_IP_PREFIX) is not None:
obj_5.set_ip_prefix(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_IP_PREFIX))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_IP_PREFIX_LEN) is not None:
obj_5.set_ip_prefix_len(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_IP_PREFIX_LEN))
obj_4.set_subnet(obj_5)
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_VIRTUAL_NETWORK) is not None:
obj_4.set_virtual_network(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_VIRTUAL_NETWORK))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SECURITY_GROUP) is not None:
obj_4.set_security_group(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SECURITY_GROUP))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_NETWORK_POLICY) is not None:
obj_4.set_network_policy(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_NETWORK_POLICY))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST) is not None:
for index_4 in range(len(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST))):
obj_5 = vnc_api.SubnetType()
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST_IP_PREFIX) is not None:
obj_5.set_ip_prefix(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST_IP_PREFIX))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST_IP_PREFIX_LEN) is not None:
obj_5.set_ip_prefix_len(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST_IP_PREFIX_LEN))
obj_4.add_subnet_list(obj_5)
obj_3.set_dst_address(obj_4)
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT) is not None:
obj_4 = vnc_api.PortType()
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT_START_PORT) is not None:
obj_4.set_start_port(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT_START_PORT))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT_END_PORT) is not None:
obj_4.set_end_port(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT_END_PORT))
obj_3.set_dst_port(obj_4)
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_ETHERTYPE) is not None:
obj_3.set_ethertype(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_ETHERTYPE))
obj_2.set_match_condition(obj_3)
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST) is not None:
obj_3 = vnc_api.ActionListType()
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_SIMPLE_ACTION) is not None:
obj_3.set_simple_action(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_SIMPLE_ACTION))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_GATEWAY_NAME) is not None:
obj_3.set_gateway_name(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_GATEWAY_NAME))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_APPLY_SERVICE) is not None:
for index_3 in range(len(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_APPLY_SERVICE))):
obj_3.add_apply_service(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_APPLY_SERVICE)[index_3])
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO) is not None:
obj_4 = vnc_api.MirrorActionType()
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ANALYZER_NAME) is not None:
obj_4.set_analyzer_name(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ANALYZER_NAME))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ENCAPSULATION) is not None:
obj_4.set_encapsulation(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ENCAPSULATION))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ANALYZER_IP_ADDRESS) is not None:
obj_4.set_analyzer_ip_address(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ANALYZER_IP_ADDRESS))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ROUTING_INSTANCE) is not None:
obj_4.set_routing_instance(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ROUTING_INSTANCE))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_UDP_PORT) is not None:
obj_4.set_udp_port(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_UDP_PORT))
obj_3.set_mirror_to(obj_4)
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_ASSIGN_ROUTING_INSTANCE) is not None:
obj_3.set_assign_routing_instance(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_ASSIGN_ROUTING_INSTANCE))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_LOG) is not None:
obj_3.set_log(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_LOG))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_ALERT) is not None:
obj_3.set_alert(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_ALERT))
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_QOS_ACTION) is not None:
obj_3.set_qos_action(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_QOS_ACTION))
obj_2.set_action_list(obj_3)
if self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_RULE_UUID) is not None:
obj_2.set_rule_uuid(self.properties.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_RULE_UUID))
obj_1.add_acl_rule(obj_2)
obj_0.set_access_control_list_entries(obj_1)
try:
obj_uuid = super(ContrailAccessControlList, self).resource_create(obj_0)
except:
raise Exception(_('access-control-list %s could not be updated.') % self.name)
self.resource_id_set(obj_uuid)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
try:
obj_0 = self.vnc_lib().access_control_list_read(
id=self.resource_id
)
except:
raise Exception(_('access-control-list %s not found.') % self.name)
if prop_diff.get(self.DISPLAY_NAME) is not None:
obj_0.set_display_name(prop_diff.get(self.DISPLAY_NAME))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES) is not None:
obj_1 = vnc_api.AclEntriesType()
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_DYNAMIC) is not None:
obj_1.set_dynamic(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_DYNAMIC))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE) is not None:
for index_1 in range(len(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE))):
obj_2 = vnc_api.AclRuleType()
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION) is not None:
obj_3 = vnc_api.MatchConditionType()
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_PROTOCOL) is not None:
obj_3.set_protocol(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_PROTOCOL))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS) is not None:
obj_4 = vnc_api.AddressType()
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET) is not None:
obj_5 = vnc_api.SubnetType()
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_IP_PREFIX) is not None:
obj_5.set_ip_prefix(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_IP_PREFIX))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_IP_PREFIX_LEN) is not None:
obj_5.set_ip_prefix_len(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_IP_PREFIX_LEN))
obj_4.set_subnet(obj_5)
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_VIRTUAL_NETWORK) is not None:
obj_4.set_virtual_network(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_VIRTUAL_NETWORK))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SECURITY_GROUP) is not None:
obj_4.set_security_group(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SECURITY_GROUP))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_NETWORK_POLICY) is not None:
obj_4.set_network_policy(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_NETWORK_POLICY))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST) is not None:
for index_4 in range(len(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST))):
obj_5 = vnc_api.SubnetType()
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST_IP_PREFIX) is not None:
obj_5.set_ip_prefix(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST_IP_PREFIX))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST_IP_PREFIX_LEN) is not None:
obj_5.set_ip_prefix_len(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_ADDRESS_SUBNET_LIST_IP_PREFIX_LEN))
obj_4.add_subnet_list(obj_5)
obj_3.set_src_address(obj_4)
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT) is not None:
obj_4 = vnc_api.PortType()
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT_START_PORT) is not None:
obj_4.set_start_port(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT_START_PORT))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT_END_PORT) is not None:
obj_4.set_end_port(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_SRC_PORT_END_PORT))
obj_3.set_src_port(obj_4)
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS) is not None:
obj_4 = vnc_api.AddressType()
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET) is not None:
obj_5 = vnc_api.SubnetType()
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_IP_PREFIX) is not None:
obj_5.set_ip_prefix(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_IP_PREFIX))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_IP_PREFIX_LEN) is not None:
obj_5.set_ip_prefix_len(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_IP_PREFIX_LEN))
obj_4.set_subnet(obj_5)
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_VIRTUAL_NETWORK) is not None:
obj_4.set_virtual_network(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_VIRTUAL_NETWORK))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SECURITY_GROUP) is not None:
obj_4.set_security_group(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SECURITY_GROUP))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_NETWORK_POLICY) is not None:
obj_4.set_network_policy(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_NETWORK_POLICY))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST) is not None:
for index_4 in range(len(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST))):
obj_5 = vnc_api.SubnetType()
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST_IP_PREFIX) is not None:
obj_5.set_ip_prefix(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST_IP_PREFIX))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST_IP_PREFIX_LEN) is not None:
obj_5.set_ip_prefix_len(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST, {})[index_4].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_ADDRESS_SUBNET_LIST_IP_PREFIX_LEN))
obj_4.add_subnet_list(obj_5)
obj_3.set_dst_address(obj_4)
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT) is not None:
obj_4 = vnc_api.PortType()
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT_START_PORT) is not None:
obj_4.set_start_port(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT_START_PORT))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT_END_PORT) is not None:
obj_4.set_end_port(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_DST_PORT_END_PORT))
obj_3.set_dst_port(obj_4)
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_ETHERTYPE) is not None:
obj_3.set_ethertype(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_MATCH_CONDITION_ETHERTYPE))
obj_2.set_match_condition(obj_3)
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST) is not None:
obj_3 = vnc_api.ActionListType()
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_SIMPLE_ACTION) is not None:
obj_3.set_simple_action(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_SIMPLE_ACTION))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_GATEWAY_NAME) is not None:
obj_3.set_gateway_name(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_GATEWAY_NAME))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_APPLY_SERVICE) is not None:
for index_3 in range(len(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_APPLY_SERVICE))):
obj_3.add_apply_service(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_APPLY_SERVICE)[index_3])
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO) is not None:
obj_4 = vnc_api.MirrorActionType()
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ANALYZER_NAME) is not None:
obj_4.set_analyzer_name(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ANALYZER_NAME))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ENCAPSULATION) is not None:
obj_4.set_encapsulation(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ENCAPSULATION))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ANALYZER_IP_ADDRESS) is not None:
obj_4.set_analyzer_ip_address(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ANALYZER_IP_ADDRESS))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ROUTING_INSTANCE) is not None:
obj_4.set_routing_instance(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_ROUTING_INSTANCE))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_UDP_PORT) is not None:
obj_4.set_udp_port(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_MIRROR_TO_UDP_PORT))
obj_3.set_mirror_to(obj_4)
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_ASSIGN_ROUTING_INSTANCE) is not None:
obj_3.set_assign_routing_instance(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_ASSIGN_ROUTING_INSTANCE))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_LOG) is not None:
obj_3.set_log(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_LOG))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_ALERT) is not None:
obj_3.set_alert(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_ALERT))
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_QOS_ACTION) is not None:
obj_3.set_qos_action(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_ACTION_LIST_QOS_ACTION))
obj_2.set_action_list(obj_3)
if prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_RULE_UUID) is not None:
obj_2.set_rule_uuid(prop_diff.get(self.ACCESS_CONTROL_LIST_ENTRIES, {}).get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE, {})[index_1].get(self.ACCESS_CONTROL_LIST_ENTRIES_ACL_RULE_RULE_UUID))
obj_1.add_acl_rule(obj_2)
obj_0.set_access_control_list_entries(obj_1)
try:
self.vnc_lib().access_control_list_update(obj_0)
except:
raise Exception(_('access-control-list %s could not be updated.') % self.name)
def handle_delete(self):
if self.resource_id is None:
return
try:
self.vnc_lib().access_control_list_delete(id=self.resource_id)
except Exception as ex:
self._ignore_not_found(ex)
LOG.warn(_('access_control_list %s already deleted.') % self.name)
def _show_resource(self):
obj = self.vnc_lib().access_control_list_read(id=self.resource_id)
obj_dict = obj.serialize_to_json()
return obj_dict
def resource_mapping():
return {
'OS::ContrailV2::AccessControlList': ContrailAccessControlList,
}
| 143.341292
| 3,465
| 0.692521
| 12,633
| 102,059
| 4.952426
| 0.012824
| 0.204878
| 0.267918
| 0.375168
| 0.979797
| 0.97823
| 0.972732
| 0.966642
| 0.963046
| 0.956237
| 0
| 0.004689
| 0.233149
| 102,059
| 711
| 3,466
| 143.542897
| 0.794708
| 0.000549
| 0
| 0.487627
| 1
| 0
| 0.067657
| 0.063784
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007278
| false
| 0.002911
| 0.016012
| 0.001456
| 0.034935
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
2f60c8f4c4bfade8eb66d023fb05b906a8a260ad
| 147
|
py
|
Python
|
lib/solutions/hello.py
|
DPNT-Sourcecode/CHK-tzcu01
|
9c420c4839bc25b9341500799d057fdc66c118bc
|
[
"Apache-2.0"
] | null | null | null |
lib/solutions/hello.py
|
DPNT-Sourcecode/CHK-tzcu01
|
9c420c4839bc25b9341500799d057fdc66c118bc
|
[
"Apache-2.0"
] | null | null | null |
lib/solutions/hello.py
|
DPNT-Sourcecode/CHK-tzcu01
|
9c420c4839bc25b9341500799d057fdc66c118bc
|
[
"Apache-2.0"
] | null | null | null |
# python2 ((((((((
# noinspection PyUnusedLocal
# friend_name = unicode string
def hello(friend_name):
return u"Hello, " + friend_name + u"!"
| 21
| 42
| 0.673469
| 17
| 147
| 5.647059
| 0.647059
| 0.3125
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008197
| 0.170068
| 147
| 6
| 43
| 24.5
| 0.778689
| 0.489796
| 0
| 0
| 0
| 0
| 0.112676
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
85ee97eb1ac754edf7ec7c3a264ff3acfcce26fd
| 502
|
py
|
Python
|
unittest_reinvent/scoring_tests/physchem/__init__.py
|
fujirock/Reinvent
|
9c57636f9d32b4ce5b75670f43906a70d5daf886
|
[
"MIT"
] | 1
|
2021-08-31T02:28:10.000Z
|
2021-08-31T02:28:10.000Z
|
unittest_reinvent/scoring_tests/physchem/__init__.py
|
prasannavd/Reinvent
|
ca02ebee8d8ed83223c55f4a1dd1b3fbc2359616
|
[
"MIT"
] | null | null | null |
unittest_reinvent/scoring_tests/physchem/__init__.py
|
prasannavd/Reinvent
|
ca02ebee8d8ed83223c55f4a1dd1b3fbc2359616
|
[
"MIT"
] | null | null | null |
from unittest_reinvent.scoring_tests.physchem.test_mw_score import *
from unittest_reinvent.scoring_tests.physchem.test_tpsa_score import *
from unittest_reinvent.scoring_tests.physchem.test_num_rot_bonds import *
from unittest_reinvent.scoring_tests.physchem.test_hbd_lipinski import *
from unittest_reinvent.scoring_tests.physchem.test_hba_lipinski import *
from unittest_reinvent.scoring_tests.physchem.test_num_rings import *
from unittest_reinvent.scoring_tests.physchem.test_slogp_score import *
| 62.75
| 73
| 0.888446
| 71
| 502
| 5.873239
| 0.267606
| 0.201439
| 0.335731
| 0.453237
| 0.901679
| 0.901679
| 0.901679
| 0.796163
| 0.556355
| 0
| 0
| 0
| 0.055777
| 502
| 7
| 74
| 71.714286
| 0.879747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 11
|
c07b2bf64256802cae0493d4826d778139caadf2
| 679
|
py
|
Python
|
IRIS/iris_simple_test.py
|
petercunning/notebook
|
5b26f2dc96bcb36434542b397de6ca5fa3b61a0a
|
[
"MIT"
] | 32
|
2015-01-07T01:48:05.000Z
|
2022-03-02T07:07:42.000Z
|
IRIS/iris_simple_test.py
|
petercunning/notebook
|
5b26f2dc96bcb36434542b397de6ca5fa3b61a0a
|
[
"MIT"
] | 1
|
2015-04-13T21:00:18.000Z
|
2015-04-13T21:00:18.000Z
|
IRIS/iris_simple_test.py
|
petercunning/notebook
|
5b26f2dc96bcb36434542b397de6ca5fa3b61a0a
|
[
"MIT"
] | 30
|
2015-01-28T09:31:29.000Z
|
2022-03-07T03:08:28.000Z
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import iris
url='http://oceanmodeling.pmc.ucsc.edu:8080/thredds/dodsC/ccsnrt/fmrc/CCSNRT_Aggregation_best.ncd'
var='potential temperature'
cube = iris.load_cube(url,var)
# <codecell>
import iris
url='http://oceanmodeling.pmc.ucsc.edu:8080/thredds/dodsC/ccsnrt/fmrc/CCSNRT_Aggregation_best.ncd'
var='potential temperature'
cube = iris.load_cube(url,var)
# <codecell>
import iris
url='http://omgsrv1.meas.ncsu.edu:8080/thredds/dodsC/fmrc/us_east/US_East_Forecast_Model_Run_Collection_best.ncd'
var='sea_water_potential_temperature'
cube = iris.load_cube(url,var)
# <codecell>
iris.__version__
# <codecell>
| 21.21875
| 113
| 0.77025
| 99
| 679
| 5.070707
| 0.40404
| 0.083665
| 0.10757
| 0.125498
| 0.719124
| 0.719124
| 0.719124
| 0.719124
| 0.719124
| 0.619522
| 0
| 0.025518
| 0.076583
| 679
| 31
| 114
| 21.903226
| 0.77512
| 0.148748
| 0
| 0.769231
| 0
| 0.230769
| 0.640845
| 0.054577
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
23b91fea6a64d203a4fd2e242be42a73b274dfae
| 7,380
|
py
|
Python
|
code/graphics/experimental_evaluation.py
|
ShivaP69/Exploiting-Personalized-Calibration-and-Metrics-for-Fairness-Recommendation
|
e46b690453ab8b424f65c1142b8a86f6be7adcb3
|
[
"MIT"
] | 1
|
2021-11-06T11:35:15.000Z
|
2021-11-06T11:35:15.000Z
|
code/graphics/experimental_evaluation.py
|
ShivaP69/Exploiting-Personalized-Calibration-and-Metrics-for-Fairness-Recommendation
|
e46b690453ab8b424f65c1142b8a86f6be7adcb3
|
[
"MIT"
] | null | null | null |
code/graphics/experimental_evaluation.py
|
ShivaP69/Exploiting-Personalized-Calibration-and-Metrics-for-Fairness-Recommendation
|
e46b690453ab8b424f65c1142b8a86f6be7adcb3
|
[
"MIT"
] | 1
|
2021-09-22T11:18:26.000Z
|
2021-09-22T11:18:26.000Z
|
import os
from copy import deepcopy
from settings.config import EVALUATION_METRIC_LABEL, FAIRNESS_METRIC_LABEL, algorithm_label, FONT_SIZE_VALUE, \
LAMBDA_VALUE_LABEL, EVALUATION_VALUE_LABEL, DPI_VALUE, QUALITY_VALUE, markers_list, line_style_list, \
postprocessing_results_path, MAP_LABEL, MC_LABEL, MACE_LABEL
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
# matplotlib.style.use('ggplot')
def evaluation_linear_fairness_by_algo_over_lambda(evaluation_results_df, k):
save_dir = postprocessing_results_path + '/' + str(k) + '/'
for metric in evaluation_results_df[EVALUATION_METRIC_LABEL].unique().tolist():
evaluation_subset_df = evaluation_results_df[evaluation_results_df[EVALUATION_METRIC_LABEL] == metric]
for recommender in evaluation_subset_df[algorithm_label].unique().tolist():
recommender_subset_df = evaluation_subset_df[evaluation_subset_df[algorithm_label] == recommender]
plt.figure()
plt.grid(True)
plt.xlabel('Weight', fontsize=FONT_SIZE_VALUE)
lambda_values = [str(x) for x in recommender_subset_df[LAMBDA_VALUE_LABEL].unique().tolist()]
plt.xticks(range(0, len(lambda_values)), lambda_values)
if metric == 'MC':
metric = 'MRMC'
plt.ylabel(metric, fontsize=FONT_SIZE_VALUE)
fairness_measures = recommender_subset_df[FAIRNESS_METRIC_LABEL].unique().tolist()
n = len(fairness_measures)
for distance_metric, m, l in zip(fairness_measures, markers_list[:n], line_style_list[:n]):
distance_subset_df = recommender_subset_df[
recommender_subset_df[FAIRNESS_METRIC_LABEL] == distance_metric]
plt.plot([str(x) for x in distance_subset_df[LAMBDA_VALUE_LABEL].tolist()],
distance_subset_df[EVALUATION_VALUE_LABEL].tolist(), alpha=0.5, linestyle=l, marker=m,
label=distance_metric)
plt.legend(loc='best', borderaxespad=0.)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
plt.savefig(
save_dir
+ metric
+ '_'
+ recommender
+ '.png',
format='png',
dpi=DPI_VALUE,
quality=QUALITY_VALUE,
bbox_inches='tight'
)
plt.close('all')
def evaluation_map_by_mc(evaluation_results_df, k):
save_dir = postprocessing_results_path + '/' + str(k) + '/'
for distance_metric in evaluation_results_df[FAIRNESS_METRIC_LABEL].unique().tolist():
map_subset_df = evaluation_results_df[
(evaluation_results_df[FAIRNESS_METRIC_LABEL] == distance_metric) & (evaluation_results_df[
EVALUATION_METRIC_LABEL] == MAP_LABEL)]
mc_subset_df = evaluation_results_df[
(evaluation_results_df[FAIRNESS_METRIC_LABEL] == distance_metric) & (
evaluation_results_df[EVALUATION_METRIC_LABEL] == MC_LABEL)]
plt.figure()
plt.grid(True)
plt.xlabel(MAP_LABEL, fontsize=FONT_SIZE_VALUE)
plt.ylabel('MRMC', fontsize=FONT_SIZE_VALUE)
algorithm_list = evaluation_results_df[algorithm_label].unique().tolist()
n = len(algorithm_list)
for algorithm, m, l in zip(algorithm_list, markers_list[:n], line_style_list[:n]):
algorithm_map_subset_df = deepcopy(map_subset_df[
map_subset_df[algorithm_label] == algorithm])
algorihm_mc_subset_df = deepcopy(mc_subset_df[
mc_subset_df[algorithm_label] == algorithm])
algorithm_map_subset_df[LAMBDA_VALUE_LABEL] = algorithm_map_subset_df[LAMBDA_VALUE_LABEL].astype('category')
algorithm_map_subset_df.sort_values(by=[LAMBDA_VALUE_LABEL], inplace=True)
algorihm_mc_subset_df[LAMBDA_VALUE_LABEL] = algorihm_mc_subset_df[LAMBDA_VALUE_LABEL].astype('category')
algorihm_mc_subset_df.sort_values(by=[LAMBDA_VALUE_LABEL], inplace=True)
plt.plot(algorithm_map_subset_df[EVALUATION_VALUE_LABEL].tolist(),
algorihm_mc_subset_df[EVALUATION_VALUE_LABEL].tolist(), alpha=0.5, linestyle=l, marker=m,
label=algorithm)
plt.legend(loc='best', borderaxespad=0.)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
plt.savefig(
save_dir
+ MAP_LABEL
+ '_'
+ MC_LABEL
+ '_'
+ distance_metric
+ '.png',
format='png',
dpi=DPI_VALUE,
quality=QUALITY_VALUE,
bbox_inches='tight'
)
plt.close('all')
def evaluation_map_by_mace(evaluation_results_df, k):
save_dir = postprocessing_results_path + '/' + str(k) + '/'
for distance_metric in evaluation_results_df[FAIRNESS_METRIC_LABEL].unique().tolist():
map_subset_df = evaluation_results_df[
(evaluation_results_df[FAIRNESS_METRIC_LABEL] == distance_metric) & (evaluation_results_df[
EVALUATION_METRIC_LABEL] == MAP_LABEL)]
mc_subset_df = evaluation_results_df[
(evaluation_results_df[FAIRNESS_METRIC_LABEL] == distance_metric) & (
evaluation_results_df[EVALUATION_METRIC_LABEL] == MACE_LABEL)]
plt.figure()
plt.grid(True)
plt.xlabel(MAP_LABEL, fontsize=FONT_SIZE_VALUE)
plt.ylabel(MACE_LABEL, fontsize=FONT_SIZE_VALUE)
algorithm_list = evaluation_results_df[algorithm_label].unique().tolist()
n = len(algorithm_list)
for algorithm, m, l in zip(algorithm_list, markers_list[:n], line_style_list[:n]):
algorithm_map_subset_df = deepcopy(map_subset_df[
map_subset_df[algorithm_label] == algorithm])
algorihm_mc_subset_df = deepcopy(mc_subset_df[
mc_subset_df[algorithm_label] == algorithm])
algorithm_map_subset_df[LAMBDA_VALUE_LABEL] = algorithm_map_subset_df[LAMBDA_VALUE_LABEL].astype('category')
algorithm_map_subset_df.sort_values(by=[LAMBDA_VALUE_LABEL], inplace=True)
algorihm_mc_subset_df[LAMBDA_VALUE_LABEL] = algorihm_mc_subset_df[LAMBDA_VALUE_LABEL].astype('category')
algorihm_mc_subset_df.sort_values(by=[LAMBDA_VALUE_LABEL], inplace=True)
plt.plot(algorithm_map_subset_df[EVALUATION_VALUE_LABEL].tolist(),
algorihm_mc_subset_df[EVALUATION_VALUE_LABEL].tolist(), alpha=0.5, linestyle=l, marker=m,
label=algorithm)
plt.legend(loc='best', borderaxespad=0.)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
plt.savefig(
save_dir
+ MAP_LABEL
+ '_'
+ MACE_LABEL
+ '_'
+ distance_metric
+ '.png',
format='png',
dpi=DPI_VALUE,
quality=QUALITY_VALUE,
bbox_inches='tight'
)
plt.close('all')
| 51.608392
| 124
| 0.621409
| 830
| 7,380
| 5.103614
| 0.116867
| 0.083097
| 0.098678
| 0.075307
| 0.822474
| 0.771955
| 0.733475
| 0.720491
| 0.709632
| 0.709632
| 0
| 0.001898
| 0.286043
| 7,380
| 142
| 125
| 51.971831
| 0.80205
| 0.007046
| 0
| 0.709924
| 0
| 0
| 0.015836
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022901
| false
| 0
| 0.030534
| 0
| 0.053435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f1c660713b34ab107897d8c41093b7b8d98fcf6a
| 23,100
|
py
|
Python
|
rapid7vmconsole/api/scan_api.py
|
kiblik/vm-console-client-python
|
038f6d33e8b2654a558326c6eb87f09ee23e0e22
|
[
"MIT"
] | 61
|
2018-05-17T05:57:09.000Z
|
2022-03-08T13:59:21.000Z
|
rapid7vmconsole/api/scan_api.py
|
kiblik/vm-console-client-python
|
038f6d33e8b2654a558326c6eb87f09ee23e0e22
|
[
"MIT"
] | 33
|
2018-06-26T16:21:14.000Z
|
2022-03-03T20:55:47.000Z
|
rapid7vmconsole/api/scan_api.py
|
kiblik/vm-console-client-python
|
038f6d33e8b2654a558326c6eb87f09ee23e0e22
|
[
"MIT"
] | 43
|
2018-02-24T05:45:53.000Z
|
2022-03-31T22:15:16.000Z
|
# coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from rapid7vmconsole.api_client import ApiClient
class ScanApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_scan(self, id, **kwargs): # noqa: E501
"""Scan # noqa: E501
Returns the specified scan. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_scan(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the scan. (required)
:return: Scan
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_scan_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_scan_with_http_info(id, **kwargs) # noqa: E501
return data
def get_scan_with_http_info(self, id, **kwargs): # noqa: E501
"""Scan # noqa: E501
Returns the specified scan. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_scan_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the scan. (required)
:return: Scan
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_scan" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_scan`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/scans/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Scan', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_scans(self, **kwargs): # noqa: E501
"""Scans # noqa: E501
Returns all scans. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_scans(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool active: Return running scans or past scans (true/false value).
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfGlobalScan
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_scans_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_scans_with_http_info(**kwargs) # noqa: E501
return data
def get_scans_with_http_info(self, **kwargs): # noqa: E501
"""Scans # noqa: E501
Returns all scans. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_scans_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool active: Return running scans or past scans (true/false value).
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfGlobalScan
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['active', 'page', 'size', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_scans" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'active' in params:
query_params.append(('active', params['active'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
collection_formats['sort'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/scans', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageOfGlobalScan', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_site_scans(self, id, **kwargs): # noqa: E501
"""Site Scans # noqa: E501
Returns the scans for the specified site. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_site_scans(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the site. (required)
:param bool active: Return running scans or past scans (true/false value).
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfScan
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_site_scans_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_site_scans_with_http_info(id, **kwargs) # noqa: E501
return data
def get_site_scans_with_http_info(self, id, **kwargs): # noqa: E501
"""Site Scans # noqa: E501
Returns the scans for the specified site. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_site_scans_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the site. (required)
:param bool active: Return running scans or past scans (true/false value).
:param int page: The index of the page (zero-based) to retrieve.
:param int size: The number of records per page to retrieve.
:param list[str] sort: The criteria to sort the records by, in the format: `property[,ASC|DESC]`. The default sort order is ascending. Multiple sort criteria can be specified using multiple sort query parameters.
:return: PageOfScan
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'active', 'page', 'size', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_site_scans" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_site_scans`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'active' in params:
query_params.append(('active', params['active'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
collection_formats['sort'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/sites/{id}/scans', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageOfScan', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def set_scan_status(self, id, status, **kwargs): # noqa: E501
"""Scan Status # noqa: E501
Updates the scan status. Can pause, resume, and stop scans using this resource. In order to stop a scan the scan must be running or paused. In order to resume a scan the scan must be paused. In order to pause a scan the scan must be running. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_scan_status(id, status, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the scan. (required)
:param str status: The status of the scan. (required)
:return: Links
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_scan_status_with_http_info(id, status, **kwargs) # noqa: E501
else:
(data) = self.set_scan_status_with_http_info(id, status, **kwargs) # noqa: E501
return data
def set_scan_status_with_http_info(self, id, status, **kwargs): # noqa: E501
"""Scan Status # noqa: E501
Updates the scan status. Can pause, resume, and stop scans using this resource. In order to stop a scan the scan must be running or paused. In order to resume a scan the scan must be paused. In order to pause a scan the scan must be running. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_scan_status_with_http_info(id, status, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the scan. (required)
:param str status: The status of the scan. (required)
:return: Links
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'status'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method set_scan_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `set_scan_status`") # noqa: E501
# verify the required parameter 'status' is set
if ('status' not in params or
params['status'] is None):
raise ValueError("Missing the required parameter `status` when calling `set_scan_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
if 'status' in params:
path_params['status'] = params['status'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/scans/{id}/{status}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Links', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def start_scan(self, id, **kwargs): # noqa: E501
"""Site Scans # noqa: E501
Starts a scan for the specified site. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_scan(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the site. (required)
:param bool override_blackout: Whether to request for the override of an scan blackout window.
:param AdhocScan scan: The details for the scan.
:return: CreatedReferenceScanIDLink
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.start_scan_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.start_scan_with_http_info(id, **kwargs) # noqa: E501
return data
def start_scan_with_http_info(self, id, **kwargs): # noqa: E501
"""Site Scans # noqa: E501
Starts a scan for the specified site. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_scan_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the site. (required)
:param bool override_blackout: Whether to request for the override of an scan blackout window.
:param AdhocScan scan: The details for the scan.
:return: CreatedReferenceScanIDLink
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'override_blackout', 'scan'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method start_scan" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `start_scan`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'override_blackout' in params:
query_params.append(('overrideBlackout', params['override_blackout'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'scan' in params:
body_params = params['scan']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/sites/{id}/scans', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreatedReferenceScanIDLink', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 40.526316
| 263
| 0.604719
| 2,798
| 23,100
| 4.799857
| 0.071837
| 0.054207
| 0.020849
| 0.026806
| 0.940804
| 0.934996
| 0.930305
| 0.919509
| 0.912063
| 0.905436
| 0
| 0.018072
| 0.300519
| 23,100
| 569
| 264
| 40.59754
| 0.813096
| 0.37632
| 0
| 0.777409
| 1
| 0
| 0.174309
| 0.043881
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036545
| false
| 0
| 0.013289
| 0
| 0.10299
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f1d90e63985e3e66fbc1735c59bf01ea3bbce690
| 17,693
|
py
|
Python
|
PAD1/my_dataset.py
|
yueyechen/cvpr20
|
69e6f9fd2393048bf48d7542a19ff0087b97d033
|
[
"Apache-2.0"
] | 1
|
2020-12-18T06:02:29.000Z
|
2020-12-18T06:02:29.000Z
|
PAD1/my_dataset.py
|
yueyechen/cvpr20
|
69e6f9fd2393048bf48d7542a19ff0087b97d033
|
[
"Apache-2.0"
] | 4
|
2020-06-13T03:25:05.000Z
|
2022-01-13T02:17:29.000Z
|
PAD1/my_dataset.py
|
yueyechen/cvpr20
|
69e6f9fd2393048bf48d7542a19ff0087b97d033
|
[
"Apache-2.0"
] | 3
|
2020-03-02T10:10:48.000Z
|
2020-05-11T08:27:40.000Z
|
from torch.utils.data import Dataset
from PIL import Image, ImageOps
import os
import csv
import random
import numpy as np
# from torchvision import transforms as trans
def default_loader(path):
return Image.open(path).convert('RGB')
def default_loader_half(path):
img = Image.open(path).convert('RGB')
return img.crop((0,0,img.size[0], img.size[1]/2)) #
def TTA_5_cropps(img, target_size):
width, height = img.size
target_w, target_h = target_size
start_x = (width - target_w) // 2
start_y = (height - target_h) // 2
starts = [
[start_x, start_y],
[start_x - target_w, start_y],
[start_x, start_y - target_w],
[start_x + target_w, start_y],
[start_x, start_y + target_w]
]
crops = []
for start_index in starts:
x, y = start_index
x = min(max(0, x), width - target_w - 1)
y = min(max(0, y), height - target_h - 1)
patch = img.crop((x, y, x+target_w, y + target_h))
crops.append(patch)
return crops
def TTA_9_cropps(img, target_size):
width, height = img.size
target_w, target_h = target_size
start_x = (width - target_w) // 2
start_y = (height - target_h) // 2
starts = [[start_x, start_y],
[start_x - target_w, start_y],
[start_x, start_y - target_h],
[start_x + target_w, start_y],
[start_x, start_y + target_h],
[start_x + target_w, start_y + target_h],
[start_x - target_w, start_y - target_h],
[start_x - target_w, start_y + target_h],
[start_x + target_w, start_y - target_h],
]
crops = []
for start_index in starts:
x, y = start_index
x = min(max(0, x), width - target_w - 1)
y = min(max(0, y), height - target_h - 1)
patch = img.crop((x, y, x + target_w, y + target_h))
crops.append(patch)
return crops
def TTA_18_cropps(img, target_size):
width, height = img.size
target_w, target_h = target_size
start_x = (width - target_w) // 2
start_y = (height - target_h) // 2
starts = [[start_x, start_y],
[start_x - target_w, start_y],
[start_x, start_y - target_h],
[start_x + target_w, start_y],
[start_x, start_y + target_h],
[start_x + target_w, start_y + target_h],
[start_x - target_w, start_y - target_h],
[start_x - target_w, start_y + target_h],
[start_x + target_w, start_y - target_h],
]
crops = []
for start_index in starts:
x, y = start_index
x = min(max(0, x), width - target_w - 1)
y = min(max(0, y), height - target_h - 1)
patch = img.crop((x, y, x + target_w, y + target_h))
crops.append(patch)
crops.append(patch.transpose(Image.FLIP_LEFT_RIGHT))
return crops
def TTA_36_cropps(img, target_size):
width, height = img.size
target_w, target_h = target_size
start_x = (width - target_w) // 2
start_y = (height - target_h) // 2
starts = [[start_x, start_y],
[start_x - target_w, start_y],
[start_x, start_y - target_h],
[start_x + target_w, start_y],
[start_x, start_y + target_h],
[start_x + target_w, start_y + target_h],
[start_x - target_w, start_y - target_h],
[start_x - target_w, start_y + target_h],
[start_x + target_w, start_y - target_h],
]
crops = []
for start_index in starts:
x, y = start_index
x = min(max(0, x), width - target_w - 1)
y = min(max(0, y), height - target_h - 1)
patch = img.crop((x, y, x + target_w, y + target_h))
patch_lr = patch.transpose(Image.FLIP_LEFT_RIGHT)
crops.append(patch)
crops.append(patch.transpose(Image.FLIP_TOP_BOTTOM))
crops.append(patch_lr)
crops.append(patch_lr.transpose(Image.FLIP_TOP_BOTTOM))
return crops
class MyDataset_huoti_val_patch(Dataset):
def __init__(self, conf, target_transform=None, loader=default_loader):
fh = open(conf.val_list, 'r')
imgs = []
if conf.eval.format == 'rgb':
for line in fh:
line = line.strip('\n')
line = line.rstrip()
words = line.split()
imgs.append((words[0], int(words[3])))
elif conf.eval.format == 'nir':
for line in fh:
line = line.strip('\n')
line = line.rstrip()
words = line.split()
imgs.append((words[2], int(words[3])))
elif conf.eval.format == 'depth':
for line in fh:
line = line.strip('\n')
line = line.rstrip()
words = line.split()
imgs.append((words[1], int(words[3])))
self.imgs = imgs
self.transform = conf.eval.transform
self.target_transform = target_transform
if conf.model.half_face:
self.loader = default_loader_half
else:
self.loader = loader
self.root = conf.huoti_folder
self.input_size = conf.eval.input_size
self.random_offset = conf.eval.random_offset
self.patch_size = conf.patch_size
self.patch_num = conf.patch_num
def __getitem__(self, index):
fn1, label = self.imgs[index]
img1 = self.loader(os.path.join(str(self.root), fn1))
img1 = img1.resize((self.input_size[0] + self.random_offset[0], self.input_size[1] + self.random_offset[1]))
if self.patch_num == 5:
imgs = TTA_5_cropps(img1, self.patch_size)
elif self.patch_num == 9:
imgs = TTA_9_cropps(img1, self.patch_size)
elif self.patch_num == 18:
imgs = TTA_18_cropps(img1, self.patch_size)
elif self.patch_num == 36:
imgs = TTA_36_cropps(img1, self.patch_size)
if self.transform is not None:
imgs = [self.transform(t) for t in imgs]
if self.target_transform is not None:
label = self.target_transform(label)
return [imgs], label, [fn1]
def __len__(self):
return len(self.imgs)
class MyDataset_huoti_train_rectified(Dataset):
def __init__(self, conf, target_transform=None, loader=default_loader):
fh = open(conf.train_list, 'r')
imgs = []
self.rects = []
self.counter = 0
if conf.train.format == 'rgb':
for line in fh:
data = line.strip().split()
rgb_name = data[0]
label = float(data[-1])
rect = [int(float(x)) for x in data[1:5]]
if (np.array(rect)==-1).any():
continue
self.counter += 1
imgs.append((rgb_name, label))
self.rects.append(rect)
elif conf.train.format == 'depth':
for line in fh:
data = line.strip().split()
depth_name = data[5]
label = float(data[-1])
rect = [int(float(x)) for x in data[6:10]]
if (np.array(rect)==-1).any():
continue
self.counter += 1
imgs.append((depth_name, label))
self.rects.append(rect)
elif conf.train.format == 'nir':
for line in fh:
data = line.strip().split()
# nir_name = data[10]
nir_name = data[6]
label = float(data[-1])
# rect = [int(float(x)) for x in data[11:15]]
rect = [int(float(x)) for x in data[7:11]]
if (np.array(rect)==-1).any():
continue
self.counter += 1
imgs.append((nir_name, label))
self.rects.append(rect)
else:
raise ValueError
self.imgs = imgs
self.transform = conf.train.transform
self.target_transform = target_transform
self.loader = loader
self.root = conf.huoti_folder
self.input_size = conf.model.input_size
self.random_offset = conf.model.random_offset
self.expand_ratio = 1.2
# self.process_method = process_method(conf.process_method)
def __getitem__(self, index):
# ========= rect00 =================
fn1, label = self.imgs[index]
img1 = self.loader(os.path.join(str(self.root), fn1))
rect = self.rects[index]
rect_w = rect[2]-rect[0]
rect_h = rect[3]-rect[1]
w, h = img1.size
if rect_w < rect_h:
origin = rect[0]+rect[2]
rect[0] = int(origin/2 - rect_h/2)
rect[2] = int(origin/2 + rect_h/2)
border_l = abs(rect[0]) if rect[0]<0 else 0
border_r = (rect[2]-w) if rect[2]>w else 0
img1 = ImageOps.expand(img1, (border_l, 0 , border_r, 0), 0)
rect[0] = max(0, rect[0])
rect[2] = rect[0]+rect_h
else:
origin = rect[1]+rect[3]
rect[1] = int(origin/2 - rect_w/2)
rect[3] = int(origin/2 + rect_w/2)
border_t = abs(rect[1]) if rect[1]<0 else 0
border_b = (rect[3]-h) if rect[3]>h else 0
img1 = ImageOps.expand(img1, (0, border_t, 0, border_b), 0)
rect[1]=max(0, rect[1])
rect[3]=rect[0]+rect_w
img1 = img1.crop((rect[0], rect[1], rect[2], rect[3]))
img1 = img1.resize((self.input_size[0] + self.random_offset[0], self.input_size[1] + self.random_offset[1]))
offset_x = random.randint(0, self.random_offset[0])
offset_y = random.randint(0, self.random_offset[1])
img1 = img1.crop((offset_x, offset_y, offset_x + self.input_size[0], offset_y + self.input_size[1]))
# random horizantal flip
if random.random() > 0.5:
img1 = img1.transpose(Image.FLIP_LEFT_RIGHT)
# random rotate
if random.random() > 0.2:
degree = random.randint(-15, 15)
img1 = img1.rotate(degree, expand=False)
if self.transform is not None:
img1 = self.transform(img1)
if self.target_transform is not None:
label = self.target_transform(label)
return [img1], label, [fn1]
def __len__(self):
return self.counter
class MyDataset_huoti_val_rectified(Dataset):
def __init__(self, conf, target_transform=None, loader=default_loader):
fh = open(conf.val_list, 'r')
imgs = []
self.rects = []
self.counter = 0
if conf.eval.format == 'rgb':
for line in fh:
data = line.strip().split()
rgb_name = data[0]
rect = [int(float(x)) for x in data[1:5]]
if (np.array(rect) == -1).any():
continue
self.counter += 1
imgs.append(rgb_name)
self.rects.append(rect)
elif conf.eval.format == 'depth':
for line in fh:
data = line.strip().split()
depth_name = data[5]
rect = [int(float(x)) for x in data[6:10]]
if (np.array(rect) == -1).any():
continue
self.counter += 1
imgs.append(depth_name)
self.rects.append(rect)
elif conf.eval.format == 'nir':
for line in fh:
data = line.strip().split()
# nir_name = data[10]
nir_name = data[6]
# rect = [int(float(x)) for x in data[11:15]]
rect = [int(float(x)) for x in data[7:11]]
if (np.array(rect) == -1).any():
continue
self.counter += 1
imgs.append(nir_name)
self.rects.append(rect)
else:
raise ValueError
self.imgs = imgs
self.transform = conf.eval.transform
self.target_transform = target_transform
self.loader = loader
self.root = conf.huoti_folder
self.input_size = conf.eval.input_size
self.random_offset = conf.eval.random_offset
self.expand_ratio = 1.2
def __getitem__(self, index):
# =========== rect00 ====================
fn1= self.imgs[index]
img1 = self.loader(os.path.join(str(self.root), fn1))
rect = self.rects[index]
rect_w = rect[2] - rect[0]
rect_h = rect[3] - rect[1]
w, h = img1.size
if rect_w < rect_h:
origin = rect[0] + rect[2]
rect[0] = int(origin / 2 - rect_h / 2)
rect[2] = int(origin / 2 + rect_h / 2)
border_l = abs(rect[0]) if rect[0] < 0 else 0
border_r = (rect[2] - w) if rect[2] > w else 0
img1 = ImageOps.expand(img1, (border_l, 0, border_r, 0), 0)
rect[0] = max(0, rect[0])
rect[2] = rect[0] + rect_h
else:
origin = rect[1] + rect[3]
rect[1] = int(origin / 2 - rect_w / 2)
rect[3] = int(origin / 2 + rect_w / 2)
border_t = abs(rect[1]) if rect[1] < 0 else 0
border_b = (rect[3] - h) if rect[3] > h else 0
img1 = ImageOps.expand(img1, (0, border_t, 0, border_b), 0)
rect[1] = max(0, rect[1])
rect[3] = rect[1] + rect_w
img1 = img1.crop((rect[0], rect[1], rect[2], rect[3]))
img1 = img1.resize((self.input_size[0] + self.random_offset[0], self.input_size[1] + self.random_offset[1]))
left = self.random_offset[0] / 2
top = self.random_offset[1] / 2
right = left + self.input_size[0]
bottom = top + self.input_size[1]
img1 = img1.crop((left, top, right, bottom))
if self.transform is not None:
img1 = self.transform(img1)
return [img1], [fn1]
def __len__(self):
return self.counter
class MyDataset_huoti_test_rectified(Dataset):
def __init__(self, conf, target_transform=None, loader=default_loader):
fh = open(conf.val_list, 'r')
imgs = []
self.rects = []
self.counter = 0
if conf.eval.format == 'rgb':
for line in fh:
data = line.strip().split()
rgb_name = data[0]
rect = [int(float(x)) for x in data[1:5]]
if (np.array(rect) == -1).any():
continue
self.counter += 1
imgs.append(rgb_name)
self.rects.append(rect)
elif conf.eval.format == 'depth':
for line in fh:
data = line.strip().split()
depth_name = data[5]
rect = [int(float(x)) for x in data[6:10]]
if (np.array(rect) == -1).any():
continue
self.counter += 1
imgs.append(depth_name)
self.rects.append(rect)
elif conf.eval.format == 'nir':
for line in fh:
data = line.strip().split()
# nir_name = data[10]
nir_name = data[6]
# rect = [int(float(x)) for x in data[11:15]]
rect = [int(float(x)) for x in data[7:11]]
if (np.array(rect) == -1).any():
continue
self.counter += 1
imgs.append(nir_name)
self.rects.append(rect)
else:
raise ValueError
self.imgs = imgs
self.transform = conf.eval.transform
self.target_transform = target_transform
self.loader = loader
self.root = conf.huoti_folder
self.input_size = conf.eval.input_size
self.random_offset = conf.eval.random_offset
self.expand_ratio = 1.2
def __getitem__(self, index):
# =========== rect00 ====================
fn1 = self.imgs[index]
img1 = self.loader(os.path.join(str(self.root), fn1))
rect = self.rects[index]
rect_w = rect[2] - rect[0]
rect_h = rect[3] - rect[1]
w, h = img1.size
if rect_w < rect_h:
origin = rect[0] + rect[2]
rect[0] = int(origin / 2 - rect_h / 2)
rect[2] = int(origin / 2 + rect_h / 2)
border_l = abs(rect[0]) if rect[0] < 0 else 0
border_r = (rect[2] - w) if rect[2] > w else 0
img1 = ImageOps.expand(img1, (border_l, 0, border_r, 0), 0)
rect[0] = max(0, rect[0])
rect[2] = rect[0] + rect_h
else:
origin = rect[1] + rect[3]
rect[1] = int(origin / 2 - rect_w / 2)
rect[3] = int(origin / 2 + rect_w / 2)
border_t = abs(rect[1]) if rect[1] < 0 else 0
border_b = (rect[3] - h) if rect[3] > h else 0
img1 = ImageOps.expand(img1, (0, border_t, 0, border_b), 0)
rect[1] = max(0, rect[1])
rect[3] = rect[1] + rect_w
img1 = img1.crop((rect[0], rect[1], rect[2], rect[3]))
img1 = img1.resize((self.input_size[0] + self.random_offset[0], self.input_size[1] + self.random_offset[1]))
left = self.random_offset[0] / 2
top = self.random_offset[1] / 2
right = left + self.input_size[0]
bottom = top + self.input_size[1]
img1 = img1.crop((left, top, right, bottom))
if self.transform is not None:
img1 = self.transform(img1)
return [img1], [fn1]
def __len__(self):
return self.counter
| 36.182004
| 116
| 0.521336
| 2,394
| 17,693
| 3.675021
| 0.059315
| 0.030234
| 0.021823
| 0.029552
| 0.900546
| 0.885429
| 0.861218
| 0.855876
| 0.855876
| 0.82587
| 0
| 0.035677
| 0.347313
| 17,693
| 488
| 117
| 36.256148
| 0.726186
| 0.025151
| 0
| 0.831325
| 0
| 0
| 0.003482
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043373
| false
| 0
| 0.014458
| 0.012048
| 0.101205
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7b0347c53508b50b340135c3ea4bbd46505829ab
| 49
|
py
|
Python
|
instance/config.py
|
kuya-ui/News-API
|
bfc2cd8688c3c24cdc6f82e276b3849b50b0f005
|
[
"MIT"
] | null | null | null |
instance/config.py
|
kuya-ui/News-API
|
bfc2cd8688c3c24cdc6f82e276b3849b50b0f005
|
[
"MIT"
] | null | null | null |
instance/config.py
|
kuya-ui/News-API
|
bfc2cd8688c3c24cdc6f82e276b3849b50b0f005
|
[
"MIT"
] | null | null | null |
NEWS_API_KEY = '8120360ba9e342dbaccb75e01109ca34'
| 49
| 49
| 0.897959
| 4
| 49
| 10.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.425532
| 0.040816
| 49
| 1
| 49
| 49
| 0.468085
| 0
| 0
| 0
| 0
| 0
| 0.64
| 0.64
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7b0fb4171a80ee792aea797e2f08c00229a88d4e
| 95
|
py
|
Python
|
Python/Topics/Functional decomposition/Full name/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 5
|
2020-08-29T15:15:31.000Z
|
2022-03-01T18:22:34.000Z
|
Python/Topics/Functional decomposition/Full name/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | null | null | null |
Python/Topics/Functional decomposition/Full name/main.py
|
drtierney/hyperskill-problems
|
b74da993f0ac7bcff1cbd5d89a3a1b06b05f33e0
|
[
"MIT"
] | 1
|
2020-12-02T11:13:14.000Z
|
2020-12-02T11:13:14.000Z
|
# create the function
def create_full_name(name, last_name):
return name + " " + last_name
| 23.75
| 38
| 0.715789
| 14
| 95
| 4.571429
| 0.571429
| 0.25
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189474
| 95
| 3
| 39
| 31.666667
| 0.831169
| 0.2
| 0
| 0
| 0
| 0
| 0.013514
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
9e53c8a45feb86919198cc27596b841819f8ee68
| 68
|
py
|
Python
|
main/settings/base.py
|
triat/yogi-the-bot
|
01e2f17b9983fe2e9db68ea8cdd093b11ba5d588
|
[
"MIT"
] | 23
|
2019-03-20T15:36:23.000Z
|
2022-01-25T11:15:16.000Z
|
main/settings/base.py
|
triat/yogi-the-bot
|
01e2f17b9983fe2e9db68ea8cdd093b11ba5d588
|
[
"MIT"
] | 19
|
2019-04-02T05:19:35.000Z
|
2021-06-25T15:18:50.000Z
|
main/settings/base.py
|
triat/yogi-the-bot
|
01e2f17b9983fe2e9db68ea8cdd093b11ba5d588
|
[
"MIT"
] | 7
|
2019-04-07T23:18:18.000Z
|
2021-05-09T04:34:29.000Z
|
from main.settings.logs import *
from main.settings.common import *
| 22.666667
| 34
| 0.794118
| 10
| 68
| 5.4
| 0.6
| 0.296296
| 0.592593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 68
| 2
| 35
| 34
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9e954fa95e60e76b8ac4faa15f5f68a86b947303
| 34,195
|
py
|
Python
|
pkgs/ops-pkg/src/genie/libs/ops/igmp/nxos/tests/igmp_output.py
|
kecorbin/genielibs
|
5d3951b8911013691822e73e9c3d0f557ca10f43
|
[
"Apache-2.0"
] | null | null | null |
pkgs/ops-pkg/src/genie/libs/ops/igmp/nxos/tests/igmp_output.py
|
kecorbin/genielibs
|
5d3951b8911013691822e73e9c3d0f557ca10f43
|
[
"Apache-2.0"
] | null | null | null |
pkgs/ops-pkg/src/genie/libs/ops/igmp/nxos/tests/igmp_output.py
|
kecorbin/genielibs
|
5d3951b8911013691822e73e9c3d0f557ca10f43
|
[
"Apache-2.0"
] | null | null | null |
'''
Igmp Genie Ops Object Outputs for NXOS.
'''
class IgmpOutput(object):
ShowIpIgmpInterface = {
"vrfs": {
"default": {
"groups_count": 2,
"interface": {
"Ethernet2/2": {
"query_max_response_time": 10,
"vrf_name": "default",
"statistics": {
"general": {
"sent": {
"v2_reports": 0,
"v2_queries": 16,
"v2_leaves": 0
},
"received": {
"v2_reports": 0,
"v2_queries": 16,
"v2_leaves": 0
}
}
},
"configured_query_max_response_time": 10,
"pim_dr": True,
"vrf_id": 1,
"querier": "10.1.3.1",
"membership_count": 0,
"last_member": {
"query_count": 2,
"mrt": 1,
},
"startup_query": {
"interval": 31,
"configured_interval": 31,
"count": 2,
},
"link_status": "up",
"subnet": "10.1.3.0/24",
"address": "10.1.3.1",
"link_local_groups_reporting": False,
"unsolicited_report_interval": 10,
"enable_refcount": 1,
"enable": True,
"next_query_sent_in": "00:00:55",
"configured_query_interval": 125,
"old_membership_count": 0,
"group_timeout": 260,
"configured_robustness_variable": 2,
"vpc_svi": False,
"querier_version": 2,
"version": 2,
"query_interval": 125,
"querier_timeout": 255,
"immediate_leave": False,
"configured_group_timeout": 260,
"host_version": 2,
"configured_querier_timeout": 255,
"robustness_variable": 2,
"oper_status": "up"
},
"Ethernet2/1": {
"query_max_response_time": 15,
"vrf_name": "default",
"statistics": {
"errors": {
"router_alert_check": 19,
},
"general": {
"sent": {
"v2_reports": 0,
"v3_queries": 11,
"v2_leaves": 0,
"v3_reports": 56,
"v2_queries": 5
},
"received": {
"v2_reports": 0,
"v3_queries": 11,
"v2_leaves": 0,
"v3_reports": 56,
"v2_queries": 5
}
}
},
"configured_query_max_response_time": 15,
"max_groups": 10,
"vrf_id": 1,
"querier": "10.1.2.1",
"membership_count": 4,
"last_member": {
"query_count": 5,
"mrt": 1,
},
"startup_query": {
"interval": 33,
"configured_interval": 31,
"count": 5,
},
"pim_dr": True,
"link_status": "up",
"subnet": "10.1.2.0/24",
"address": "10.1.2.1",
"link_local_groups_reporting": False,
"unsolicited_report_interval": 10,
"enable_refcount": 9,
"enable": True,
"group_policy": "access-group-filter",
"next_query_sent_in": "00:00:47",
"configured_query_interval": 133,
"old_membership_count": 0,
"group_timeout": 680,
"configured_robustness_variable": 5,
"vpc_svi": False,
"querier_version": 3,
"available_groups": 10,
"version": 3,
"query_interval": 133,
"querier_timeout": 672,
"immediate_leave": True,
"configured_group_timeout": 260,
"host_version": 3,
"configured_querier_timeout": 255,
"robustness_variable": 5,
"oper_status": "up"
}
}
},
"VRF1": {
"groups_count": 2,
"interface": {
"Ethernet2/4": {
"query_max_response_time": 15,
"vrf_name": "VRF1",
"statistics": {
"general": {
"sent": {
"v2_reports": 0,
"v3_queries": 8,
"v2_leaves": 0,
"v3_reports": 44,
"v2_queries": 8
},
"received": {
"v2_reports": 0,
"v3_queries": 8,
"v2_leaves": 0,
"v3_reports": 44,
"v2_queries": 8
}
}
},
"configured_query_max_response_time": 15,
"max_groups": 10,
"vrf_id": 3,
"querier": "20.1.2.1",
"membership_count": 4,
"last_member": {
"query_count": 5,
"mrt": 1,
},
"startup_query": {
"interval": 33,
"configured_interval": 31,
"count": 5,
},
"pim_dr": True,
"link_status": "up",
"subnet": "20.1.2.0/24",
"address": "20.1.2.1",
"link_local_groups_reporting": False,
"unsolicited_report_interval": 10,
"enable_refcount": 9,
"enable": True,
"group_policy": "access-group-filter",
"next_query_sent_in": "00:00:06",
"configured_query_interval": 133,
"old_membership_count": 0,
"group_timeout": 680,
"configured_robustness_variable": 5,
"vpc_svi": False,
"querier_version": 3,
"available_groups": 10,
"version": 3,
"query_interval": 133,
"querier_timeout": 672,
"immediate_leave": True,
"configured_group_timeout": 260,
"host_version": 3,
"configured_querier_timeout": 255,
"robustness_variable": 5,
"oper_status": "up"
},
"Ethernet2/3": {
"query_max_response_time": 10,
"vrf_name": "VRF1",
"statistics": {
"general": {
"sent": {
"v2_reports": 0,
"v2_queries": 16,
"v2_leaves": 0
},
"received": {
"v2_reports": 0,
"v2_queries": 16,
"v2_leaves": 0
}
}
},
"configured_query_max_response_time": 10,
"pim_dr": True,
"vrf_id": 3,
"querier": "20.1.3.1",
"membership_count": 0,
"last_member": {
"query_count": 2,
"mrt": 1,
},
"startup_query": {
"interval": 31,
"configured_interval": 31,
"count": 2,
},
"link_status": "up",
"subnet": "20.1.3.0/24",
"address": "20.1.3.1",
"link_local_groups_reporting": False,
"unsolicited_report_interval": 10,
"enable_refcount": 1,
"enable": True,
"next_query_sent_in": "00:00:47",
"configured_query_interval": 125,
"old_membership_count": 0,
"group_timeout": 260,
"configured_robustness_variable": 2,
"vpc_svi": False,
"querier_version": 2,
"version": 2,
"query_interval": 125,
"querier_timeout": 255,
"immediate_leave": False,
"configured_group_timeout": 260,
"host_version": 2,
"configured_querier_timeout": 255,
"robustness_variable": 2,
"oper_status": "up"
}
}
},
"tenant1": {
"groups_count": 0,
},
"manegement": {
"groups_count": 0,
}
}
}
ShowIpIgmpGroups = {
"vrfs": {
"VRF1": {
"interface": {
"Ethernet2/4": {
"group": {
"239.6.6.6": {
"expire": "never",
"type": "S",
"last_reporter": "20.1.2.1",
"up_time": "00:15:27"
},
"239.8.8.8": {
"source": {
"2.2.2.2": {
"expire": "never",
"type": "S",
"last_reporter": "20.1.2.1",
"up_time": "00:15:27"
}
},
},
"239.5.5.5": {
"expire": "never",
"type": "S",
"last_reporter": "20.1.2.1",
"up_time": "00:15:27"
},
"239.7.7.7": {
"source": {
"2.2.2.1": {
"expire": "never",
"type": "S",
"last_reporter": "20.1.2.1",
"up_time": "00:15:27"
}
},
}
}
}
},
"total_entries": 4
},
"default": {
"interface": {
"Ethernet2/1": {
"group": {
"239.6.6.6": {
"expire": "never",
"type": "S",
"last_reporter": "10.1.2.1",
"up_time": "00:20:53"
},
"239.8.8.8": {
"source": {
"2.2.2.2": {
"expire": "never",
"type": "S",
"last_reporter": "10.1.2.1",
"up_time": "00:20:34"
}
},
},
"239.5.5.5": {
"expire": "never",
"type": "S",
"last_reporter": "10.1.2.1",
"up_time": "00:21:00"
},
"239.7.7.7": {
"source": {
"2.2.2.1": {
"expire": "never",
"type": "S",
"last_reporter": "10.1.2.1",
"up_time": "00:20:42"
}
},
}
}
}
},
"total_entries": 4
}
}
}
ShowIpIgmpLocalGroups = {
"vrfs": {
"default": {
"interface": {
"Ethernet2/1": {
"join_group": {
"239.1.1.1 *": {
"source": "*",
"group": "239.1.1.1"
},
"239.3.3.3 1.1.1.1": {
"source": "1.1.1.1",
"group": "239.3.3.3"
},
"239.2.2.2 *": {
"source": "*",
"group": "239.2.2.2"
},
"239.4.4.4 1.1.1.2": {
"source": "1.1.1.2",
"group": "239.4.4.4"
}
},
"static_group": {
"239.5.5.5 *": {
"source": "*",
"group": "239.5.5.5"
},
"239.8.8.8 2.2.2.2": {
"source": "2.2.2.2",
"group": "239.8.8.8"
},
"239.6.6.6 *": {
"source": "*",
"group": "239.6.6.6"
},
"239.7.7.7 2.2.2.1": {
"source": "2.2.2.1",
"group": "239.7.7.7"
}
},
"group": {
"239.1.1.1": {
"last_reporter": "00:00:13",
"type": "local"
},
"239.8.8.8": {
"source": {
"2.2.2.2": {
"last_reporter": "01:06:47",
"type": "static"
}
},
},
"239.2.2.2": {
"last_reporter": "00:00:18",
"type": "local"
},
"239.4.4.4": {
"source": {
"1.1.1.2": {
"last_reporter": "00:00:06",
"type": "local"
}
},
},
"239.6.6.6": {
"last_reporter": "01:06:47",
"type": "static"
},
"239.5.5.5": {
"last_reporter": "01:06:47",
"type": "static"
},
"239.3.3.3": {
"source": {
"1.1.1.1": {
"last_reporter": "00:00:11",
"type": "local"
}
},
},
"239.7.7.7": {
"source": {
"2.2.2.1": {
"last_reporter": "01:06:47",
"type": "static"
}
},
}
}
}
}
},
"VRF1": {
"interface": {
"Ethernet2/4": {
"join_group": {
"239.1.1.1 *": {
"source": "*",
"group": "239.1.1.1"
},
"239.3.3.3 1.1.1.1": {
"source": "1.1.1.1",
"group": "239.3.3.3"
},
"239.2.2.2 *": {
"source": "*",
"group": "239.2.2.2"
},
"239.4.4.4 1.1.1.2": {
"source": "1.1.1.2",
"group": "239.4.4.4"
}
},
"static_group": {
"239.5.5.5 *": {
"source": "*",
"group": "239.5.5.5"
},
"239.8.8.8 2.2.2.2": {
"source": "2.2.2.2",
"group": "239.8.8.8"
},
"239.6.6.6 *": {
"source": "*",
"group": "239.6.6.6"
},
"239.7.7.7 2.2.2.1": {
"source": "2.2.2.1",
"group": "239.7.7.7"
}
},
"group": {
"239.1.1.1": {
"last_reporter": "00:00:50",
"type": "local"
},
"239.8.8.8": {
"source": {
"2.2.2.2": {
"last_reporter": "01:06:47",
"type": "static"
}
},
},
"239.2.2.2": {
"last_reporter": "00:00:54",
"type": "local"
},
"239.4.4.4": {
"source": {
"1.1.1.2": {
"last_reporter": "00:00:55",
"type": "local"
}
},
},
"239.6.6.6": {
"last_reporter": "01:06:47",
"type": "static"
},
"239.5.5.5": {
"last_reporter": "01:06:47",
"type": "static"
},
"239.3.3.3": {
"source": {
"1.1.1.1": {
"last_reporter": "00:01:01",
"type": "local"
}
},
},
"239.7.7.7": {
"source": {
"2.2.2.1": {
"last_reporter": "01:06:47",
"type": "static"
}
},
}}}}}}
}
Igmp_info = {
"vrfs": {
"VRF1": {
"interfaces": {
"Ethernet2/4": {
"querier": "20.1.2.1",
"group_policy": "access-group-filter",
"robustness_variable": 5,
"join_group": {
"239.3.3.3 1.1.1.1": {
"source": "1.1.1.1",
"group": "239.3.3.3"
},
"239.4.4.4 1.1.1.2": {
"source": "1.1.1.2",
"group": "239.4.4.4"
},
"239.1.1.1 *": {
"source": "*",
"group": "239.1.1.1"
},
"239.2.2.2 *": {
"source": "*",
"group": "239.2.2.2"
}
},
"immediate_leave": True,
"max_groups": 10,
"enable": True,
"version": 3,
"oper_status": "up",
"group": {
"239.5.5.5": {
"up_time": "00:15:27",
"last_reporter": "20.1.2.1",
"expire": "never"
},
"239.6.6.6": {
"up_time": "00:15:27",
"last_reporter": "20.1.2.1",
"expire": "never"
},
"239.8.8.8": {
"source": {
"2.2.2.2": {
"last_reporter": "20.1.2.1",
"up_time": "00:15:27",
"expire": "never"
}
}
},
"239.7.7.7": {
"source": {
"2.2.2.1": {
"last_reporter": "20.1.2.1",
"up_time": "00:15:27",
"expire": "never"
}
}
}
},
"static_group": {
"239.7.7.7 2.2.2.1": {
"source": "2.2.2.1",
"group": "239.7.7.7"
},
"239.5.5.5 *": {
"source": "*",
"group": "239.5.5.5"
},
"239.6.6.6 *": {
"source": "*",
"group": "239.6.6.6"
},
"239.8.8.8 2.2.2.2": {
"source": "2.2.2.2",
"group": "239.8.8.8"
}
},
"query_max_response_time": 15,
"query_interval": 133
},
"Ethernet2/3": {
"querier": "20.1.3.1",
"immediate_leave": False,
"enable": True,
"version": 2,
"oper_status": "up",
"query_max_response_time": 10,
"robustness_variable": 2,
"query_interval": 125
}
},
"groups_count": 2
},
"manegement": {
"groups_count": 0
},
"tenant1": {
"groups_count": 0
},
"default": {
"interfaces": {
"Ethernet2/2": {
"querier": "10.1.3.1",
"immediate_leave": False,
"enable": True,
"version": 2,
"oper_status": "up",
"query_max_response_time": 10,
"robustness_variable": 2,
"query_interval": 125
},
"Ethernet2/1": {
"querier": "10.1.2.1",
"group_policy": "access-group-filter",
"robustness_variable": 5,
"join_group": {
"239.3.3.3 1.1.1.1": {
"source": "1.1.1.1",
"group": "239.3.3.3"
},
"239.4.4.4 1.1.1.2": {
"source": "1.1.1.2",
"group": "239.4.4.4"
},
"239.1.1.1 *": {
"source": "*",
"group": "239.1.1.1"
},
"239.2.2.2 *": {
"source": "*",
"group": "239.2.2.2"
}
},
"immediate_leave": True,
"max_groups": 10,
"enable": True,
"version": 3,
"oper_status": "up",
"group": {
"239.5.5.5": {
"up_time": "00:21:00",
"last_reporter": "10.1.2.1",
"expire": "never"
},
"239.6.6.6": {
"up_time": "00:20:53",
"last_reporter": "10.1.2.1",
"expire": "never"
},
"239.8.8.8": {
"source": {
"2.2.2.2": {
"last_reporter": "10.1.2.1",
"up_time": "00:20:34",
"expire": "never"
}
}
},
"239.7.7.7": {
"source": {
"2.2.2.1": {
"last_reporter": "10.1.2.1",
"up_time": "00:20:42",
"expire": "never"
}
}
}
},
"static_group": {
"239.7.7.7 2.2.2.1": {
"source": "2.2.2.1",
"group": "239.7.7.7"
},
"239.5.5.5 *": {
"source": "*",
"group": "239.5.5.5"
},
"239.6.6.6 *": {
"source": "*",
"group": "239.6.6.6"
},
"239.8.8.8 2.2.2.2": {
"source": "2.2.2.2",
"group": "239.8.8.8"
}
},
"query_max_response_time": 15,
"query_interval": 133
}
},
"groups_count": 2
}
}
}
| 46.52381
| 75
| 0.198099
| 1,878
| 34,195
| 3.439297
| 0.072417
| 0.027868
| 0.024152
| 0.027868
| 0.91717
| 0.894875
| 0.881251
| 0.863446
| 0.863446
| 0.852454
| 0
| 0.143864
| 0.701184
| 34,195
| 734
| 76
| 46.587193
| 0.488256
| 0.001141
| 0
| 0.719225
| 0
| 0
| 0.204
| 0.027997
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0.006916
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
7b8e657fee5326c973f54210b84099cc69dbab6c
| 199
|
py
|
Python
|
rl/environments/__init__.py
|
RamiSketcher/AMMI-RL
|
6d51587ff4d5dc14cba87fca561bd7b340b44586
|
[
"MIT"
] | null | null | null |
rl/environments/__init__.py
|
RamiSketcher/AMMI-RL
|
6d51587ff4d5dc14cba87fca561bd7b340b44586
|
[
"MIT"
] | null | null | null |
rl/environments/__init__.py
|
RamiSketcher/AMMI-RL
|
6d51587ff4d5dc14cba87fca561bd7b340b44586
|
[
"MIT"
] | 2
|
2021-09-24T22:51:42.000Z
|
2021-11-14T16:43:17.000Z
|
# Import all special environmnets
# PDDM environments
from rl.environments.pddm_envs.gym_env import GymEnv
# MBPO environments
import rl.environments.mbpo.env
# import rl.environments.mbpo.static
| 19.9
| 52
| 0.819095
| 27
| 199
| 5.962963
| 0.518519
| 0.26087
| 0.248447
| 0.298137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115578
| 199
| 9
| 53
| 22.111111
| 0.914773
| 0.512563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
7bcf8c79ec5a65e697cd4822b204302012d38df4
| 3,231
|
py
|
Python
|
graphics.py
|
220111/Sam-s-Quest
|
2619ae08687ca60ef9b3e3571223946e48600fcb
|
[
"Apache-2.0"
] | null | null | null |
graphics.py
|
220111/Sam-s-Quest
|
2619ae08687ca60ef9b3e3571223946e48600fcb
|
[
"Apache-2.0"
] | null | null | null |
graphics.py
|
220111/Sam-s-Quest
|
2619ae08687ca60ef9b3e3571223946e48600fcb
|
[
"Apache-2.0"
] | 1
|
2019-10-15T16:31:46.000Z
|
2019-10-15T16:31:46.000Z
|
#this it the graphics file to Sam's Battle Simulator
#Copyright 2018 Henry Morin
def title():
print("""
_______ _______ _______ _ _______ ______ _________________________ _______ _______________________
( ____ ( ___ | | | ____ \ ( ___ \( ___ )__ __|__ __( \ ( ____ \ ( ____ \__ __( )
| ( \/ ( ) | () () |/| ( \/ | ( ) ) ( ) | ) ( ) ( | ( | ( \/ | ( \/ ) ( | () () |
| (_____| (___) | || || | | (_____ | (__/ /| (___) | | | | | | | | (__ | (_____ | | | || || |
(_____ ) ___ | |(_)| | (_____ ) | __ ( | ___ | | | | | | | | __) (_____ ) | | | |(_)| |
) | ( ) | | | | ) | | ( \ \| ( ) | | | | | | | | ( ) | | | | | | |
/\____) | ) ( | ) ( | /\____) | | )___) ) ) ( | | | | | | (____/\ (____/\ /\____) |__) (__| ) ( |
\_______)/ \|/ \| \_______) |/ \___/|/ \| )_( )_( (_______(_______/ \_______)_______// \|
""")
def monimg():
print("""
|
\_ /; _.._
`\~--.._ //' ,(+=\\\\
`//////\ \\/;' /~ (\\\\
~/////\~\`)' /; ))))
`~' | ((`~/((((\
;'_\'\ /')) )))))
/~/ '" "' _. /'/\_ /^\`((( \
`\/' _.-~/--/ ( =( | , |
_/~\_)_}___/^\/~`\.__\|==|
/uUUU) ) | |
( / | _-=o|\__ /'/~ \
' /' | /(((((\`\( |~\/
/' | /' )))))"`\`\|/_/---.._,$$,
.,ssS$$$Sss|._/_..-(((' )\)>>> ~\$
,sS$$$$$$$$$$$|$$$$$$$ |/ //'~`o `\
,$$$$$$$$$$$$$$|$$S$$$$' ( / \
,$$$$$$$$$$$$S$$|$$$$$$$' | / ,s$$$
s$$$$$S$$$$$$$$$S|$$$$$$$$ | / $$$$$$
_~,$S""'' ``"S|$$S$$$$$" (_,`\, ,$$$$$$$;
/~ ,"' / 'S$$$$$" \_./| s$$$$$$$$$$
(~' _, \==~~) / "'' \ | $$$$$$$$$$$$
(0\ /0/ \-' /' \ | | ,$$$$$$$$$$$$$,
`/' ' _-~ |= \_-\ $$$$$$$$$$$$$$s
(~~~) _.-~_- \ \ ,s|= | `"$$$$$$$$$$$$$$$
( `-' )/>-~ _/-__ | |,$$$|_/, `"$$$$$$$$$$$$
/V^^^^V~/' _/~/~~ ~~-| |$$$$$$$$ "$$$$$$$$$$,
/ (^^^^),/' /' ) /S$$$$$$$; ,$$$$$$$$$$$,
,$$_ `~~~'.,/' / _-ss, /(/-(/-(/' ,s$$$$$$$$$$$$$
,s$$$$$ssSS$$$' ,$'.s$$$$$$$$' (/-(/-(/-(/-(/'
S$$$$$$$$$$$$$$ ,$$$$$$$$$$$$$'
(/-(/-(/-(/-(/' _s$$$$$$$$$$$$$$
(/-(/-(/-(/-(/-'
""")
| 44.875
| 113
| 0.132157
| 51
| 3,231
| 2.647059
| 0.490196
| 0.207407
| 0.222222
| 0.237037
| 0.081481
| 0.081481
| 0.081481
| 0.081481
| 0.081481
| 0.081481
| 0
| 0.004076
| 0.544413
| 3,231
| 71
| 114
| 45.507042
| 0.087636
| 0.024451
| 0
| 0.085106
| 0
| 0.148936
| 0.976752
| 0.069427
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| true
| 0
| 0
| 0
| 0.042553
| 0.042553
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c8e24cb3030d1a046a139dd298bd79b5b136abff
| 14,319
|
py
|
Python
|
rlcarsim.py
|
abhijitmajumdar/Reinforcement_Learning_Car_Simulator
|
42c56eb7eabaaafadf6b601ffa54fdf229882da0
|
[
"MIT"
] | 13
|
2018-06-26T08:28:52.000Z
|
2021-04-13T14:27:19.000Z
|
rlcarsim.py
|
abhijitmajumdar/Reinforcement_Learning_Car_Simulator
|
42c56eb7eabaaafadf6b601ffa54fdf229882da0
|
[
"MIT"
] | null | null | null |
rlcarsim.py
|
abhijitmajumdar/Reinforcement_Learning_Car_Simulator
|
42c56eb7eabaaafadf6b601ffa54fdf229882da0
|
[
"MIT"
] | 2
|
2018-03-05T22:32:17.000Z
|
2018-03-06T00:12:13.000Z
|
from Simulator import Environment,GUI,RL,Utils
# User controls the movement of all cars simultaneously, using 'w','a','s','d' for forward, left, reverse and right.
# Use 'q' to quit and 'r' to reset and randomize agents
# Can be used to sense how the system works(sensor readings, collisions and car score)
def user_control(config_file,arena_select=None,continuous_control=False):
rl_params,car_definitions,env_definition = Utils.configurator(config_file)
if arena_select is None: arena_select=env_definition['arena_select'] # Override config arena select if specified in command line arguments
cars = [Environment.Car(car) for car in car_definitions]
env = Environment.Environment(env_definition,arena_select=arena_select)
gui = GUI.GUI(env_definition,arena_select,car_definitions,['Average loss','Total reward','Running reward'],trace=True)
env.check_agent_connections(*cars)
env.randomize(rl_params['random_agent_position'],rl_params['random_destination_position'],*cars)
env.compute_interaction(*cars) # Necessary to ensure vaild values
gui.init_destination(False,*cars)
# Controls for the user, change as needed
d = {'w':[0.5,0.0],'s':[-0.5,0.0],'a':[0.5,0.6],'d':[0.5,-0.6]} if continuous_control==False else {'w':[0.02,0.0],'s':[-0.02,0.0],'a':[0.0,0.04],'d':[0.0,-0.04]}
loop_for = 1 if continuous_control==True else 10
instruction_string = 'User commands\n'+'\n'.join([str(key)+': '+str(d[key]) for key in d])+'\n'
instruction_string += 'r: reset\nq: quit\n\n'
def change_destination():
gui.init_destination(True,*cars)
if gui.mouse_click_loaction[0] is not None:
for car in cars:
env.change_destination(car,float(gui.mouse_click_loaction[0]),float(gui.mouse_click_loaction[1]))
gui.mouse_click_loaction = [None,None]
while(True):
for i in range(loop_for):
debug_data = ''
for idx,car in enumerate(cars):
if car.physical_state == 'collided' or car.physical_state == 'destination':
debug_data += 'Car '+str(idx)+'\n'+car.physical_state+'!\n\n'
continue
car.update(env_definition['dt'])
s_r = car.get_sensor_reading()
gui.update(idx,car.get_state())
delta = car.get_partial_state()
debug_data += 'Car '+str(idx)+'\nSensor readings:'+', '.join(['{:.2f}'.format(x) for x in s_r])+'\nPartial state='+', '.join(['{:.2f}'.format(y) for y in delta])+'\n'
env.compute_interaction(*cars)
gui.update_debug_info(instruction_string+debug_data)
change_destination()
gui.refresh()
user_ip = gui.get_userinput()
if user_ip == 'q': break
if user_ip == 'r':
for idx,car in enumerate(cars):
gui.update(idx,car.get_state(),draw_car=False,force_end_line=True)
car.reset()
env.randomize(True,True,*cars)
[v,s] = d[user_ip] if (user_ip in d) else [0,0]
if continuous_control==True:
for car in cars:
car.increment_velocity(v)
car.increment_steering(s)
else:
for car in cars:
car.set_velocity(v)
car.set_steering(s)
def rl_control_dqn(config_file,arena_select=None,load_weights=None,testing=False):
run='test' if testing is True else 'learn'
rl_params,car_definitions,env_definition = Utils.configurator(config_file)
if arena_select is None: arena_select=env_definition['arena_select'] # Override config arena select if specified in command line arguments
cars = [Environment.Car(car) for car in car_definitions]
car = cars[0]
env = Environment.Environment(env_definition,arena_select=arena_select)
gui = GUI.GUI(env_definition,arena_select,car_definitions,env_definition['graphs'],trace=True)
env.check_agent_connections(car)
env.compute_interaction(car) # Necessary to ensure vaild values
gui.init_destination(False,car)
rl = RL.DQN(rl_params, testing=testing, sample_state=car.get_partial_state(),load_weights=load_weights)
def initialize(run_state):
car.reset()
env.compute_interaction(car)
car.get_sensor_reading()
if run_state=='test':
env.randomize(rl_params['random_agent_position'],rl_params['random_destination_position'],car)
env.set_max_steps(2*env_definition['max_steps'])
gui.enable_trace(remove_traces=True)
gui.set_run_select(gui.runs[1])
gui.update_debug_info('[Testing]\n'+'Currently learned weights loaded')
else:
env.randomize(rl_params['random_agent_position'],rl_params['random_destination_position'],car)
env.set_max_steps(env_definition['max_steps'])
gui.enable_trace(remove_traces=True)
gui.set_run_select(gui.runs[0])
gui.update_debug_info('[Training]\n')
env.compute_interaction(car)
rl.init_state_buffer(env,env_definition['dt'],car) # Necessary beacuse the simulator computes agent history, even when its disabled(when the history is set to 1)
def check_run_button(current_state):
if gui.get_run_select()==gui.runs[0] and current_state=='test':
print '\n\n\nLearning\n'
initialize(run_state='learn')
return 'learn'
elif gui.get_run_select()==gui.runs[1] and current_state=='learn':
print '\n\n\nTesting\n'
initialize(run_state='test')
return 'test'
else:
return current_state
def change_destination():
gui.init_destination(True,car)
if gui.mouse_click_loaction[0] is not None:
env.change_destination(car,float(gui.mouse_click_loaction[0]),float(gui.mouse_click_loaction[1]))
gui.mouse_click_loaction = [None,None]
initialize(run_state=run)
while(1):
run = check_run_button(current_state=run)
change_destination()
if gui.get_userinput()=='q': break
if run=='test':
terminals,terminal_states,physical_states = rl.run_step(env,env_definition['dt'],car,True)
for i,term in enumerate(terminals):
gui.update(term,terminal_states[i],draw_car=False,force_end_line=True)
print 'Car',i,':',physical_states[i]
gui.update(0,car.get_state())
gui.refresh()
else:
terminals,terminal_states,physical_states,debug,log = rl.learn_step(env,env_definition['dt'],car)
if debug is not None:
gui.update_debug_info(debug)
gui.update_graph(log['epoch'],log['avg_loss'],env_definition['graphs'][0])
gui.update_graph(log['epoch'],log['total_reward'],env_definition['graphs'][1])
gui.update_graph(log['epoch'],log['running_reward'],env_definition['graphs'][2])
for i,term in enumerate(terminals):
gui.update(term,terminal_states[i],draw_car=False,force_end_line=True)
show_car = (car.epoch%100==0)
gui.update(0,car.get_state(),draw_car=show_car)
if show_car==True or len(terminals)>0: gui.refresh()
def rl_control_mvedql(config_file,arena_select=None,load_weights=None,testing=False):
run='test' if testing is True else 'learn'
rl_params,car_definitions,env_definition = Utils.configurator(config_file)
if arena_select is None: arena_select=env_definition['arena_select'] # Override config arena select if specified in command line arguments
cars = [Environment.Car(car) for car in car_definitions]
env = Environment.Environment(env_definition,arena_select=arena_select)
gui = GUI.GUI(env_definition,arena_select,car_definitions,env_definition['graphs'],trace=True)
env.check_agent_connections(*cars)
env.compute_interaction(*cars) # Necessary to ensure vaild values
gui.init_destination(False,*cars)
rl = RL.MVEDQL(rl_params, testing=testing, sample_state=cars[0].get_partial_state(),load_weights=load_weights)
def initialize(run_state):
for car in cars: car.reset()
env.compute_interaction(*cars)
for car in cars: car.get_sensor_reading()
if run_state=='test':
env.randomize(rl_params['random_agent_position'],rl_params['random_destination_position'],*cars)
env.set_max_steps(2*env_definition['max_steps'])
gui.enable_trace(remove_traces=True)
gui.set_run_select(gui.runs[1])
gui.update_debug_info('[Testing]\n'+'Currently learned weights loaded')
else:
env.randomize(rl_params['random_agent_position'],rl_params['random_destination_position'],*cars)
env.set_max_steps(env_definition['max_steps'])
gui.enable_trace(remove_traces=True)
gui.set_run_select(gui.runs[0])
gui.update_debug_info('[Training]\n')
env.compute_interaction(*cars)
rl.init_state_buffer(env,env_definition['dt'],None,*cars) # Necessary beacuse the simulator computes agent history, even when its disabled(when the history is set to 1)
def check_run_button(current_state):
if gui.get_run_select()==gui.runs[0] and current_state=='test':
print '\n\n\nLearning\n'
initialize(run_state='learn')
return 'learn'
elif gui.get_run_select()==gui.runs[1] and current_state=='learn':
print '\n\n\nTesting\n'
initialize(run_state='test')
return 'test'
else:
return current_state
def change_destination():
gui.init_destination(True,*cars)
if gui.mouse_click_loaction[0] is not None:
for car in cars:
env.change_destination(car,float(gui.mouse_click_loaction[0]),float(gui.mouse_click_loaction[1]))
gui.mouse_click_loaction = [None,None]
initialize(run_state=run)
while(1):
run = check_run_button(current_state=run)
change_destination()
if gui.get_userinput()=='q': break
if run=='test':
terminals,terminal_states,physical_states = rl.run_step(env,env_definition['dt'],True,*cars)
for i,term in enumerate(terminals):
gui.update(term,terminal_states[i],draw_car=False,force_end_line=True)
print 'Car',i,':',physical_states[i]
for i in range(len(cars)): gui.update(i,cars[i].get_state())
gui.refresh()
else:
terminals,terminal_states,physical_states,debug,log = rl.learn_step(env,env_definition['dt'],*cars)
if debug is not None:
gui.update_debug_info(debug)
gui.update_graph(log['epoch'],log['avg_loss'],env_definition['graphs'][0])
gui.update_graph(log['epoch'],log['total_reward'],env_definition['graphs'][1])
gui.update_graph(log['epoch'],log['running_reward'],env_definition['graphs'][2])
for i,term in enumerate(terminals):
gui.update(term,terminal_states[i],draw_car=False,force_end_line=True)
show_car = (cars[0].epoch%100==0)
for i in range(len(cars)): gui.update(i,cars[i].get_state(),draw_car=show_car)
if show_car==True or len(terminals)>0: gui.refresh()
def checkpoint_run(config_file,arena_select=None,load_weights=None):
if load_weights is None:
raise Exception('To run checkpoint, weights need to be sepcified using load_weights')
dests = [(24,2),(27,5),(16.5,1.5),(18,8.7),(16.4,4.4),(19,4),(26.5,5),(19,5.2)]
rl_params,car_definitions,env_definition = Utils.configurator(config_file)
if arena_select is None: arena_select=env_definition['arena_select'] # Override config arena select if specified in command line arguments
cars = [Environment.Car(car) for car in car_definitions]
car = cars[0]
env = Environment.Environment(env_definition,arena_select=arena_select)
gui = GUI.GUI(env_definition,arena_select,car_definitions,env_definition['graphs'],trace=True)
env.check_agent_connections(car)
env.compute_interaction(car) # Necessary to ensure vaild values
gui.init_destination(False,car)
rl = RL.DQN(rl_params, testing=True, sample_state=car.get_partial_state(),load_weights=load_weights)
# Initialize
env.set_max_steps(2*env_definition['max_steps'])
gui.enable_trace(remove_traces=True)
rl.init_state_buffer(env,env_definition['dt'],car) # Necessary beacuse the simulator computes car history, even when its disabled(when the history is set to 1)
for idx,pt in enumerate(dests):
gui.create_marker(pt,'x',0.15)
gui.create_label(pt,str(idx+1))
d_idx = 0
car.set_destination(dests[d_idx])
gui.create_marker((car.x,car.y),'o',0.1)
gui.create_marker((car.x,car.y),'arrow',0.5,car.omega)
while(d_idx<len(dests)):
if gui.get_userinput()=='q': break
terminals,terminal_states,physical_states = rl.run_step(env,env_definition['dt'],car,reset=False)
if car.physical_state=='collided' or car.physical_state=='destination' or car.physical_state=='timeup':
if car.physical_state=='collided':
gui.update(0,car.get_state(),draw_car=False,force_end_line=True)
continue
d_idx += 1
if d_idx>=len(dests): break
car.set_destination(dests[d_idx])
car.physical_state = 'running'
env.compute_interaction(car)
gui.sleep(2)
gui.init_destination(True,car)
gui.update(0,car.get_state())
gui.refresh()
if __name__=='__main__':
args = Utils.parse_args()
if args.control=='user':
user_control(config_file=args.config,arena_select=args.arena,continuous_control=args.cts)
elif args.control=='dqn':
rl_control_dqn(config_file=args.config,arena_select=args.arena,load_weights=args.load_weights,testing=args.test)
elif args.control=='mvedql':
rl_control_mvedql(config_file=args.config,arena_select=args.arena,load_weights=args.load_weights,testing=args.test)
elif args.control=='checkpoint':
checkpoint_run(config_file=args.config,arena_select=args.arena,load_weights=args.load_weights)
| 53.830827
| 182
| 0.665549
| 2,004
| 14,319
| 4.530439
| 0.114271
| 0.055843
| 0.023791
| 0.031722
| 0.82465
| 0.790395
| 0.768256
| 0.76319
| 0.739068
| 0.735433
| 0
| 0.011824
| 0.208534
| 14,319
| 265
| 183
| 54.033962
| 0.789288
| 0.072142
| 0
| 0.687243
| 0
| 0
| 0.082448
| 0.018087
| 0.00823
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.004115
| null | null | 0.024691
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cdddba83a3a73145cb43c2aa0801ec55211bb8ad
| 7,674
|
py
|
Python
|
idfy_rest_client/controllers/language_sets_controller.py
|
dealflowteam/Idfy
|
fa3918a6c54ea0eedb9146578645b7eb1755b642
|
[
"MIT"
] | null | null | null |
idfy_rest_client/controllers/language_sets_controller.py
|
dealflowteam/Idfy
|
fa3918a6c54ea0eedb9146578645b7eb1755b642
|
[
"MIT"
] | null | null | null |
idfy_rest_client/controllers/language_sets_controller.py
|
dealflowteam/Idfy
|
fa3918a6c54ea0eedb9146578645b7eb1755b642
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
idfy_rest_client.controllers.language_sets_controller
This file was automatically generated for Idfy by APIMATIC v2.0 ( https://apimatic.io ).
"""
from .base_controller import BaseController
from ..api_helper import APIHelper
from ..configuration import Configuration
from ..models.language_set_dto import LanguageSetDTO
class LanguageSetsController(BaseController):
"""A Controller to access Endpoints in the idfy_rest_client API."""
def create_language_set(self,
new_language_set=None):
"""Does a POST request to /text/language-sets.
Creates a new language set.
Args:
new_language_set (LanguageSetCreateDTO, optional): TODO: type
description here. Example:
Returns:
LanguageSetDTO: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_query_builder = Configuration.get_base_uri()
_query_builder += '/text/language-sets'
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.http_client.post(_query_url, headers=_headers, parameters=APIHelper.json_serialize(new_language_set))
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, LanguageSetDTO.from_dictionary)
def list_language_sets(self):
"""Does a GET request to /text/language-sets.
Returns a list of all your language sets.
Returns:
list of LanguageSetDTO: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_query_builder = Configuration.get_base_uri()
_query_builder += '/text/language-sets'
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, LanguageSetDTO.from_dictionary)
def update_language_set(self,
id,
language_set_update=None):
"""Does a PATCH request to /text/language-sets/{id}.
Updates the specified language set with the parameters passed.
Args:
id (int): TODO: type description here. Example:
language_set_update (LanguageSetUpdateDTO, optional): TODO: type
description here. Example:
Returns:
LanguageSetDTO: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(id=id)
# Prepare query URL
_query_builder = Configuration.get_base_uri()
_query_builder += '/text/language-sets/{id}'
_query_builder = APIHelper.append_url_with_template_parameters(_query_builder, {
'id': id
})
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
_request = self.http_client.patch(_query_url, headers=_headers, parameters=APIHelper.json_serialize(language_set_update))
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, LanguageSetDTO.from_dictionary)
def delete_language_set(self,
id):
"""Does a DELETE request to /text/language-sets/{id}.
Deletes the specified language set.
Args:
id (int): TODO: type description here. Example:
Returns:
void: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(id=id)
# Prepare query URL
_query_builder = Configuration.get_base_uri()
_query_builder += '/text/language-sets/{id}'
_query_builder = APIHelper.append_url_with_template_parameters(_query_builder, {
'id': id
})
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
_request = self.http_client.delete(_query_url)
_context = self.execute_request(_request)
self.validate_response(_context)
def retrieve_language_set(self,
id):
"""Does a GET request to /text/language-sets/{id}.
Retrieves the details of a single language set.
Args:
id (int): TODO: type description here. Example:
Returns:
LanguageSetDTO: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Validate required parameters
self.validate_parameters(id=id)
# Prepare query URL
_query_builder = Configuration.get_base_uri()
_query_builder += '/text/language-sets/{id}'
_query_builder = APIHelper.append_url_with_template_parameters(_query_builder, {
'id': id
})
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
_request = self.http_client.get(_query_url, headers=_headers)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, LanguageSetDTO.from_dictionary)
| 34.881818
| 130
| 0.613761
| 809
| 7,674
| 5.593325
| 0.15822
| 0.055691
| 0.035359
| 0.055249
| 0.820773
| 0.815249
| 0.794254
| 0.794254
| 0.738785
| 0.725746
| 0
| 0.000956
| 0.318348
| 7,674
| 219
| 131
| 35.041096
| 0.86408
| 0.395491
| 0
| 0.733333
| 1
| 0
| 0.074454
| 0.018485
| 0
| 0
| 0
| 0.022831
| 0
| 1
| 0.066667
| false
| 0
| 0.053333
| 0
| 0.186667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
a80c7fef5f5c203f07acaa4bb74d54a99210548f
| 70,756
|
py
|
Python
|
learning/tests/models/test_course.py
|
dbcaturra/django-koala-azure
|
7b79b7484e3530513b97ed148333ba0778f38a3e
|
[
"MIT"
] | null | null | null |
learning/tests/models/test_course.py
|
dbcaturra/django-koala-azure
|
7b79b7484e3530513b97ed148333ba0778f38a3e
|
[
"MIT"
] | null | null | null |
learning/tests/models/test_course.py
|
dbcaturra/django-koala-azure
|
7b79b7484e3530513b97ed148333ba0778f38a3e
|
[
"MIT"
] | null | null | null |
#
# Copyright (C) 2019 Guillaume Bernard <guillaume.bernard@koala-lms.org>
#
# This file is part of Koala LMS (Learning Management system)
# Koala LMS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# We make an extensive use of the Django framework, https://www.djangoproject.com/
#
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ValidationError
from django.test import TestCase
from learning.exc import RegistrationDisabledError, UserIsAlreadyCollaborator, \
UserIsAlreadyAuthor, UserNotCollaboratorError, UserIsNotStudent, UserIsAlreadyStudent, ChangeActivityOnCourseError, \
ActivityAlreadyOnCourseError, ActivityNotReusableError, \
ActivityIsNotLinkedWithThisCourseError
from learning.models import Course, CollaboratorRole, CourseAccess, CourseState, CourseCollaborator, Activity, \
CourseActivity, ActivityReuse, RegistrationOnCourse
class CourseTestCase(TestCase):
def setUp(self) -> None:
get_user_model().objects.create_user(id=1, username="william-shakespeare")
get_user_model().objects.create_user(id=2, username="emily-dickinson")
get_user_model().objects.create_user(id=3, username="h-p-lovecraft")
get_user_model().objects.create_user(id=4, username="arthur-conan-doyle")
get_user_model().objects.create_user(id=5, username="leo-tolstoy")
self.private_course = Course.objects.create(
id=1,
name="A simple private course",
description="A simple description",
author=get_user_model().objects.get(pk=1),
tags="simple, course",
access=CourseAccess.PRIVATE.name,
state=CourseState.PUBLISHED.name,
registration_enabled=True,
language="en"
)
self.public_course = Course.objects.create(
id=2,
name="A simple public course",
description="A simple description",
author=get_user_model().objects.get(pk=1),
tags="simple, course",
access=CourseAccess.PUBLIC.name,
state=CourseState.PUBLISHED.name,
registration_enabled=True,
language="en"
)
self.students_only_course = Course.objects.create(
id=3,
name="A simple students only course",
description="A simple description",
author=get_user_model().objects.get(pk=1),
tags="simple, course",
access=CourseAccess.STUDENTS_ONLY.name,
state=CourseState.PUBLISHED.name,
registration_enabled=True,
language="en"
)
self.collaborators_only_course = Course.objects.create(
id=4,
name="A simple collaborators only course",
description="A simple description",
author=get_user_model().objects.get(pk=1),
tags="simple, course",
access=CourseAccess.COLLABORATORS_ONLY.name,
state=CourseState.PUBLISHED.name,
registration_enabled=True,
language="en"
)
self.activity1 = Activity.objects.create(
id=1,
name="An activity",
description="An activity description",
author=get_user_model().objects.get(pk=1),
language="en"
)
self.activity2 = Activity.objects.create(
id=2,
name="An activity",
description="An activity description",
author=get_user_model().objects.get(pk=1),
language="en"
)
self.activity3 = Activity.objects.create(
id=3,
name="An activity",
description="An activity description",
author=get_user_model().objects.get(pk=1),
language="en"
)
self.activity4 = Activity.objects.create(
id=4,
name="An activity",
description="An activity description",
author=get_user_model().objects.get(pk=1),
language="en"
)
self.ca1 = CourseActivity.objects.create(
id=1,
rank=10,
course=self.public_course, activity=self.activity1
)
self.ca2 = CourseActivity.objects.create(
id=2,
rank=20,
course=self.public_course, activity=self.activity2
)
self.ca3 = CourseActivity.objects.create(
id=3,
rank=30,
course=self.public_course, activity=self.activity3
)
self.ca4 = CourseActivity.objects.create(
id=4,
rank=40,
course=self.public_course, activity=self.activity4
)
class CourseUserPermsTest(CourseTestCase):
def test_no_perm_for_collaborators_on_private_course(self):
user = get_user_model().objects.get(pk=2)
CourseCollaborator.objects.create(course=self.private_course, collaborator=user,
role=CollaboratorRole.OWNER.name)
self.assertIn(user, self.private_course.collaborators.all())
self.assertEqual(sorted([]), sorted(self.private_course.get_user_perms(user)))
user = get_user_model().objects.get(pk=3)
CourseCollaborator.objects.create(course=self.private_course, collaborator=user,
role=CollaboratorRole.NON_EDITOR_TEACHER.name)
self.assertIn(user, self.private_course.collaborators.all())
self.assertEqual(sorted([]), sorted(self.private_course.get_user_perms(user)))
user = get_user_model().objects.get(pk=4)
CourseCollaborator.objects.create(course=self.private_course, collaborator=user,
role=CollaboratorRole.TEACHER.name)
self.assertIn(user, self.private_course.collaborators.all())
self.assertEqual(sorted([]), sorted(self.private_course.get_user_perms(user)))
def test_no_perm_for_student_on_private_course(self):
user = get_user_model().objects.get(pk=4)
self.private_course.students.add(user)
self.assertIn(user, self.private_course.students.all())
self.assertEqual(sorted(self.private_course.get_user_perms(user)), [])
def test_perms_for_collaborator_as_owner_on_public_course(self):
user = get_user_model().objects.get(pk=2)
CourseCollaborator.objects.create(course=self.public_course, collaborator=user,
role=CollaboratorRole.OWNER.name)
self.assertIn(user, self.public_course.collaborators.all())
expected_perms = [
"view_course", "view_hidden_course", "view_similar_course", "add_course", "change_course", "delete_course",
"view_collaborators_course", "view_students_course", "change_privacy_course",
"add_collaborator_course", "add_student_course",
"delete_collaborator_course", "delete_student_course",
'change_collaborator_course', "change_student_course",
'add_objective_course', 'delete_objective_course', 'view_objective_course','change_objective_course',
]
self.assertEqual(sorted(self.public_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_author_on_public_course(self):
user = get_user_model().objects.get(pk=1)
self.assertEqual(user, self.public_course.author)
expected_perms = [
"view_course", "view_hidden_course", "view_similar_course", "add_course", "change_course", "delete_course",
"view_collaborators_course", "view_students_course", "change_privacy_course",
"add_collaborator_course", "add_student_course",
"delete_collaborator_course", "delete_student_course",
'change_collaborator_course', "change_student_course",
'add_objective_course', 'delete_objective_course', 'change_objective_course', 'view_objective_course',
]
self.assertEqual(sorted(self.public_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_author_on_private_course(self):
user = get_user_model().objects.get(pk=1)
self.assertEqual(user, self.public_course.author)
expected_perms = [
"view_course", "view_hidden_course", "view_similar_course", "add_course", "change_course", "delete_course",
"view_collaborators_course", "view_students_course", "change_privacy_course",
"add_collaborator_course", "add_student_course",
"delete_collaborator_course", "delete_student_course",
'change_collaborator_course', "change_student_course",
'add_objective_course', 'delete_objective_course', 'view_objective_course', 'change_objective_course',
]
self.assertEqual(sorted(self.private_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_author_on_students_only_course(self):
user = get_user_model().objects.get(pk=1)
self.assertEqual(user, self.public_course.author)
expected_perms = [
"view_course", "view_hidden_course", "view_similar_course", "add_course", "change_course", "delete_course",
"view_collaborators_course", "view_students_course", "change_privacy_course",
"add_collaborator_course", "add_student_course",
"delete_collaborator_course", "delete_student_course",
'change_collaborator_course', "change_student_course",
'add_objective_course', 'delete_objective_course', 'change_objective_course', 'view_objective_course',
]
self.assertEqual(sorted(self.students_only_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_author_on_collaborators_only_course(self):
user = get_user_model().objects.get(pk=1)
self.assertEqual(user, self.public_course.author)
expected_perms = [
"view_course", "view_hidden_course", "view_similar_course", "add_course", "change_course", "delete_course",
"view_collaborators_course", "view_students_course", "change_privacy_course",
"add_collaborator_course", "add_student_course",
"delete_collaborator_course", "delete_student_course",
'change_collaborator_course', "change_student_course",
'add_objective_course', 'change_objective_course', 'delete_objective_course', 'view_objective_course'
,
]
self.assertEqual(sorted(self.collaborators_only_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_collaborator_as_non_editor_teacher_on_public_course(self):
user = get_user_model().objects.get(pk=3)
CourseCollaborator.objects.create(course=self.public_course, collaborator=user,
role=CollaboratorRole.NON_EDITOR_TEACHER.name)
self.assertIn(user, self.public_course.collaborators.all())
expected_perms = [
"view_course", "view_hidden_course", "view_similar_course",
"view_collaborators_course", "view_students_course", 'view_objective_course',
]
self.assertEqual(sorted(self.public_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_collaborator_as_teacher_on_public_course(self):
user = get_user_model().objects.get(pk=4)
CourseCollaborator.objects.create(course=self.public_course, collaborator=user,
role=CollaboratorRole.TEACHER.name)
self.assertIn(user, self.public_course.collaborators.all())
expected_perms = [
"view_course", "view_hidden_course", "view_similar_course", "add_course", "change_course",
"view_collaborators_course", "view_students_course",
"add_student_course", "change_student_course", "delete_student_course",
'add_objective_course', 'delete_objective_course', 'view_objective_course','change_objective_course',
]
self.assertEqual(sorted(self.public_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_collaborator_as_owner_on_students_only_course(self):
user = get_user_model().objects.get(pk=2)
CourseCollaborator.objects.create(course=self.public_course, collaborator=user,
role=CollaboratorRole.OWNER.name)
self.assertIn(user, self.public_course.collaborators.all())
expected_perms = [
"view_course", "view_hidden_course", "view_similar_course", "add_course", "change_course", "delete_course",
"view_collaborators_course", "view_students_course", "change_privacy_course",
"add_collaborator_course", "add_student_course",
"delete_collaborator_course", "delete_student_course",
'change_collaborator_course', "change_student_course",
'view_objective_course', 'add_objective_course', 'delete_objective_course','change_objective_course',
]
self.assertEqual(sorted(self.public_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_collaborator_as_non_editor_teacher_on_students_only_course(self):
user = get_user_model().objects.get(pk=3)
CourseCollaborator.objects.create(course=self.public_course, collaborator=user,
role=CollaboratorRole.NON_EDITOR_TEACHER.name)
self.assertIn(user, self.public_course.collaborators.all())
expected_perms = [
"view_course", "view_hidden_course", "view_similar_course",
"view_collaborators_course", "view_students_course", 'view_objective_course',
]
self.assertEqual(sorted(self.public_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_collaborator_as_teacher_on_students_only_course(self):
user = get_user_model().objects.get(pk=4)
CourseCollaborator.objects.create(course=self.public_course, collaborator=user,
role=CollaboratorRole.TEACHER.name)
self.assertIn(user, self.public_course.collaborators.all())
expected_perms = [
"view_course", "view_hidden_course", "view_similar_course", "add_course", "change_course",
"view_collaborators_course", "view_students_course",
"add_student_course", "change_student_course", "delete_student_course",
'add_objective_course', 'delete_objective_course', 'view_objective_course','change_objective_course',
]
self.assertEqual(sorted(self.public_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_collaborator_as_owner_on_collaborators_only_course(self):
user = get_user_model().objects.get(pk=2)
CourseCollaborator.objects.create(course=self.public_course, collaborator=user,
role=CollaboratorRole.OWNER.name)
self.assertIn(user, self.public_course.collaborators.all())
expected_perms = [
"view_course", "view_hidden_course", "view_similar_course", "add_course", "change_course", "delete_course",
"view_collaborators_course", "view_students_course", "change_privacy_course",
"add_collaborator_course", "add_student_course", "delete_student_course", "delete_collaborator_course",
'change_collaborator_course', "change_student_course", 'add_objective_course',
'delete_objective_course', 'view_objective_course', 'change_objective_course',
]
self.assertEqual(sorted(self.public_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_collaborator_as_non_editor_teacher_on_collaborators_only_course(self):
user = get_user_model().objects.get(pk=3)
CourseCollaborator.objects.create(course=self.public_course, collaborator=user,
role=CollaboratorRole.NON_EDITOR_TEACHER.name)
self.assertIn(user, self.public_course.collaborators.all())
expected_perms = [
"view_course", "view_hidden_course", "view_similar_course",
"view_collaborators_course", "view_students_course", 'view_objective_course',
]
self.assertEqual(sorted(self.public_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_collaborator_as_teacher_on_collaborators_only_course(self):
user = get_user_model().objects.get(pk=4)
CourseCollaborator.objects.create(course=self.public_course, collaborator=user,
role=CollaboratorRole.TEACHER.name)
self.assertIn(user, self.public_course.collaborators.all())
expected_perms = [
"view_course", "view_hidden_course", "view_similar_course", "add_course", "change_course",
"view_collaborators_course", "view_students_course",
"add_student_course", "change_student_course", "delete_student_course",
'add_objective_course', 'delete_objective_course', 'view_objective_course','change_objective_course',
]
self.assertEqual(sorted(self.public_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_student_on_public_course(self):
user = get_user_model().objects.get(pk=4)
expected_perms = ["view_course"]
self.assertEqual(sorted(self.public_course.get_user_perms(user)), sorted(expected_perms))
self.public_course.students.add(user)
expected_perms = ["view_course", "view_similar_course"]
self.assertEqual(sorted(self.public_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_student_on_students_only_course(self):
user = get_user_model().objects.get(pk=4)
expected_perms = []
self.assertEqual(sorted(self.students_only_course.get_user_perms(user)), sorted(expected_perms))
self.students_only_course.students.add(user)
expected_perms = ["view_course", "view_similar_course"]
self.assertEqual(sorted(self.students_only_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_student_on_collaborators_only_course(self):
user = get_user_model().objects.get(pk=4)
expected_perms = []
self.assertEqual(sorted(self.students_only_course.get_user_perms(user)), sorted(expected_perms))
self.collaborators_only_course.students.add(user)
self.assertEqual(sorted(self.students_only_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_student_on_private_course(self):
user = get_user_model().objects.get(pk=4)
expected_perms = []
self.assertEqual(sorted(self.private_course.get_user_perms(user)), sorted(expected_perms))
self.private_course.students.add(user)
self.assertEqual(sorted(self.private_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_anonymous_on_public_course(self):
user = AnonymousUser()
expected_perms = ["view_course"]
self.assertEqual(sorted(self.public_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_anonymous_on_students_only_course(self):
user = AnonymousUser()
expected_perms = []
self.assertEqual(sorted(self.students_only_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_anonymous_on_collaborators_only_course(self):
user = AnonymousUser()
expected_perms = []
self.assertEqual(sorted(self.collaborators_only_course.get_user_perms(user)), sorted(expected_perms))
def test_perms_for_anonymous_on_private_course(self):
user = AnonymousUser()
expected_perms = []
self.assertEqual(sorted(self.collaborators_only_course.get_user_perms(user)), sorted(expected_perms))
class CourseTest(CourseTestCase):
"""
Default values
"""
def test_default_values_for_attributes(self):
course = Course.objects.create(author=get_user_model().objects.get(pk=1),
name="A sample name to test the /slug generator")
self.assertEqual(course.state, CourseState.DRAFT.name)
self.assertEqual(course.access, CourseAccess.PUBLIC.name)
self.assertEqual(course.slug, "a-sample-name-to-test-the-slug-generator")
"""
Property object_collaborators
"""
def test_object_collaborators(self):
CourseCollaborator.objects.create(
collaborator=get_user_model().objects.get(pk=1), role=CollaboratorRole.TEACHER.name,
course=self.public_course
)
self.assertEqual(1, self.public_course.object_collaborators.count())
self.assertEqual(get_user_model().objects.get(pk=1),
self.public_course.object_collaborators.first().collaborator)
self.assertEqual(CollaboratorRole.TEACHER.name, self.public_course.object_collaborators.first().role)
self.assertEqual(self.public_course, self.public_course.object_collaborators.first().course)
"""
Method: can_register
"""
def test_can_register_on_public_course(self):
# Draft, with registration enabled
self.public_course.registration_enabled = False
self.assertFalse(self.public_course.can_register)
self.public_course.state = CourseState.DRAFT.name
self.assertFalse(self.public_course.can_register)
self.public_course.registration_enabled = True
self.assertFalse(self.public_course.can_register)
# Archived, with registration enabled
self.public_course.registration_enabled = False
self.assertFalse(self.public_course.can_register)
self.public_course.state = CourseState.ARCHIVED.name
self.assertFalse(self.public_course.can_register)
self.public_course.registration_enabled = True
self.assertFalse(self.public_course.can_register)
# Published, with registration enabled
self.public_course.registration_enabled = False
self.assertFalse(self.public_course.can_register)
self.public_course.state = CourseState.PUBLISHED.name
self.assertFalse(self.public_course.can_register)
self.public_course.registration_enabled = True
self.assertTrue(self.public_course.can_register)
def test_can_register_on_collaborators_only_course(self):
# Draft, with registration enabled
self.collaborators_only_course.registration_enabled = False
self.assertFalse(self.collaborators_only_course.can_register)
self.collaborators_only_course.state = CourseState.DRAFT.name
self.assertFalse(self.collaborators_only_course.can_register)
self.collaborators_only_course.registration_enabled = True
self.assertFalse(self.collaborators_only_course.can_register)
# Archived, with registration enabled
self.collaborators_only_course.registration_enabled = False
self.assertFalse(self.collaborators_only_course.can_register)
self.collaborators_only_course.state = CourseState.ARCHIVED.name
self.assertFalse(self.collaborators_only_course.can_register)
self.collaborators_only_course.registration_enabled = True
self.assertFalse(self.collaborators_only_course.can_register)
# Private, with registration enabled
self.collaborators_only_course.registration_enabled = False
self.assertFalse(self.collaborators_only_course.can_register)
self.collaborators_only_course.state = CourseState.PUBLISHED.name
self.assertFalse(self.collaborators_only_course.can_register)
self.collaborators_only_course.registration_enabled = True
self.assertTrue(self.collaborators_only_course.can_register)
def test_can_register_on_students_only_course(self):
# Draft, with registration enabled
self.students_only_course.registration_enabled = False
self.assertFalse(self.students_only_course.can_register)
self.students_only_course.state = CourseState.DRAFT.name
self.assertFalse(self.students_only_course.can_register)
self.students_only_course.registration_enabled = True
self.assertFalse(self.students_only_course.can_register)
# Archived, with registration enabled
self.students_only_course.registration_enabled = False
self.assertFalse(self.students_only_course.can_register)
self.students_only_course.state = CourseState.ARCHIVED.name
self.assertFalse(self.students_only_course.can_register)
self.students_only_course.registration_enabled = True
self.assertFalse(self.students_only_course.can_register)
# Private, with registration enabled
self.students_only_course.registration_enabled = False
self.assertFalse(self.students_only_course.can_register)
self.students_only_course.state = CourseState.PUBLISHED.name
self.assertFalse(self.students_only_course.can_register)
self.students_only_course.registration_enabled = True
self.assertTrue(self.students_only_course.can_register)
def test_can_register_on_private_course(self):
# Draft, with registration enabled
self.private_course.registration_enabled = False
self.assertFalse(self.private_course.can_register)
self.private_course.state = CourseState.DRAFT.name
self.assertFalse(self.private_course.can_register)
self.private_course.registration_enabled = True
self.assertFalse(self.private_course.can_register)
# Archived, with registration enabled
self.private_course.registration_enabled = False
self.assertFalse(self.private_course.can_register)
self.private_course.state = CourseState.ARCHIVED.name
self.assertFalse(self.private_course.can_register)
self.private_course.registration_enabled = True
self.assertFalse(self.private_course.can_register)
# Published, with registration enabled
self.private_course.registration_enabled = False
self.assertFalse(self.private_course.can_register)
self.private_course.state = CourseState.PUBLISHED.name
self.assertFalse(self.private_course.can_register)
self.private_course.registration_enabled = True
self.assertTrue(self.private_course.can_register)
"""
Method register
"""
def test_student_cannot_register_because_is_already_student(self):
user = get_user_model().objects.get(pk=2)
# Add the user in students
self.public_course.students.add(user)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
# Test student self-registration
with self.assertRaises(UserIsAlreadyStudent):
self.public_course.register(user)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
def test_student_cannot_register_because_is_already_author(self):
user = get_user_model().objects.get(pk=1)
# Set the user as the author of the course
self.public_course.author = user
# Test student self-registration
with self.assertRaises(UserIsAlreadyAuthor):
self.public_course.register(user)
self.assertNotIn(user, self.public_course.students.all())
self.assertEqual(0, self.public_course.students.count())
def test_student_cannot_register_because_is_already_a_collaborator(self):
user = get_user_model().objects.get(pk=2)
# Add the user in the collaborators of the course
self.public_course.course_collaborators.add(
CourseCollaborator.objects.create(
collaborator=user, course=self.public_course, role=CollaboratorRole.TEACHER
)
)
# Test student self-registration
with self.assertRaises(UserIsAlreadyCollaborator):
self.public_course.register(user)
self.assertNotIn(user, self.public_course.students.all())
self.assertEqual(0, self.public_course.students.count())
def test_student_cannot_register_because_registration_is_disabled(self):
user = get_user_model().objects.get(pk=2)
# Set registration as disabled but published
self.public_course.state = CourseState.PUBLISHED.name
self.public_course.registration_enabled = False
self.assertFalse(self.public_course.can_register)
# Test student self-registration
with self.assertRaises(RegistrationDisabledError):
self.public_course.register(user)
self.assertNotIn(user, self.public_course.students.all())
self.assertEqual(0, self.public_course.students.count())
def test_student_cannot_register_because_course_is_a_draft(self):
user = get_user_model().objects.get(pk=2)
self.public_course.state = CourseState.DRAFT.name
self.public_course.registration_enabled = True
self.assertFalse(self.public_course.can_register)
with self.assertRaises(RegistrationDisabledError):
self.public_course.register(user)
self.assertNotIn(user, self.public_course.students.all())
self.assertEqual(0, self.public_course.students.count())
def test_student_cannot_register_because_course_is_archived(self):
user = get_user_model().objects.get(pk=2)
self.public_course.state = CourseState.ARCHIVED.name
self.public_course.registration_enabled = True
self.assertFalse(self.public_course.can_register)
self.public_course.state = CourseState.ARCHIVED.name
with self.assertRaises(RegistrationDisabledError):
self.public_course.register(user)
self.assertNotIn(user, self.public_course.students.all())
self.assertEqual(0, self.public_course.students.count())
def test_student_can_register_on_course(self):
user = get_user_model().objects.get(pk=2)
self.public_course.register(user)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
user = get_user_model().objects.get(pk=3)
self.public_course.register(user)
self.assertTrue(self.public_course.registrations.get(student=user).self_registration)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(2, self.public_course.students.count())
"""
Method register_student
"""
def test_cannot_register_because_is_already_student(self):
user = get_user_model().objects.get(pk=2)
# Add the user in students
self.public_course.students.add(user)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
# Test student self-registration
with self.assertRaises(UserIsAlreadyStudent):
self.public_course.register_student(user)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
def test_cannot_register_because_is_already_author(self):
user = get_user_model().objects.get(pk=1)
# Set the user as the author of the course
self.public_course.author = user
# Test student self-registration
with self.assertRaises(UserIsAlreadyAuthor):
self.public_course.register_student(user)
self.assertNotIn(user, self.public_course.students.all())
self.assertEqual(0, self.public_course.students.count())
def test_cannot_register_because_is_already_a_collaborator(self):
user = get_user_model().objects.get(pk=2)
# Add the user in the collaborators of the course
self.public_course.course_collaborators.add(
CourseCollaborator.objects.create(
collaborator=user, course=self.public_course, role=CollaboratorRole.TEACHER
)
)
# Test student self-registration
with self.assertRaises(UserIsAlreadyCollaborator):
self.public_course.register_student(user)
self.assertNotIn(user, self.public_course.students.all())
self.assertEqual(0, self.public_course.students.count())
def test_can_register_even_if_registration_is_disabled(self):
user = get_user_model().objects.get(pk=2)
# Set registration as disabled but published
self.public_course.state = CourseState.PUBLISHED.name
self.public_course.registration_enabled = False
self.assertFalse(self.public_course.can_register)
# Test student registration
self.public_course.register_student(user)
self.assertFalse(self.public_course.registrations.get(student=user).self_registration)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
def test_can_register_even_if_registration_is_a_draft(self):
user = get_user_model().objects.get(pk=2)
self.public_course.state = CourseState.DRAFT.name
self.public_course.registration_enabled = True
self.assertFalse(self.public_course.can_register)
self.public_course.register_student(user)
self.assertFalse(self.public_course.registrations.get(student=user).self_registration)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
def test_can_register_even_if_registration_is_archived(self):
user = get_user_model().objects.get(pk=2)
self.public_course.state = CourseState.ARCHIVED.name
self.public_course.registration_enabled = True
self.assertFalse(self.public_course.can_register)
self.public_course.state = CourseState.ARCHIVED.name
self.public_course.register_student(user)
self.assertFalse(self.public_course.registrations.get(student=user).self_registration)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
def test_can_register_on_course(self):
user = get_user_model().objects.get(pk=2)
self.public_course.register_student(user)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
user = get_user_model().objects.get(pk=3)
self.public_course.register_student(user)
self.assertFalse(self.public_course.registrations.get(student=user).self_registration)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(2, self.public_course.students.count())
"""
Method unsubscribe
"""
def test_student_cannot_unsubscribe_because_registration_is_disabled(self):
user = get_user_model().objects.get(pk=2)
# Set registration as disabled but published
self.public_course.state = CourseState.PUBLISHED.name
self.public_course.registration_enabled = False
self.assertFalse(self.public_course.can_register)
self.public_course.students.add(user)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
# Test student self-registration
with self.assertRaises(RegistrationDisabledError):
self.public_course.unsubscribe(user)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
def test_student_cannot_unsubscribe_because_course_is_a_draft(self):
user = get_user_model().objects.get(pk=2)
self.public_course.state = CourseState.DRAFT.name
self.public_course.registration_enabled = True
self.assertFalse(self.public_course.can_register)
self.public_course.students.add(user)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
with self.assertRaises(RegistrationDisabledError):
self.public_course.unsubscribe(user)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
def test_student_cannot_unsubscribe_because_course_is_archived(self):
user = get_user_model().objects.get(pk=2)
self.public_course.state = CourseState.ARCHIVED.name
self.public_course.registration_enabled = True
self.assertFalse(self.public_course.can_register)
self.public_course.students.add(user)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
self.public_course.state = CourseState.ARCHIVED.name
with self.assertRaises(RegistrationDisabledError):
self.public_course.unsubscribe(user)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
def test_student_cannot_unsubscribe_because_not_a_student(self):
user = get_user_model().objects.get(pk=2)
self.assertNotIn(user, self.public_course.students.all())
with self.assertRaises(UserIsNotStudent):
self.public_course.unsubscribe(user)
self.assertNotIn(user, self.public_course.students.all())
self.assertEqual(0, self.public_course.students.count())
def test_student_can_unsubscribe(self):
user = get_user_model().objects.get(pk=2)
self.assertNotIn(user, self.public_course.students.all())
self.public_course.students.add(user)
self.assertIn(user, self.public_course.students.all())
self.public_course.unsubscribe(user)
self.assertNotIn(user, self.public_course.students.all())
"""
Method unsubscribe student
"""
def test_student_cannot_unsubscribe_because_registration_even_if_is_disabled(self):
user = get_user_model().objects.get(pk=2)
# Set registration as disabled but published
self.public_course.state = CourseState.PUBLISHED.name
self.public_course.registration_enabled = False
self.assertFalse(self.public_course.can_register)
self.public_course.students.add(user)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
self.public_course.unsubscribe_student(user)
self.assertNotIn(user, self.public_course.students.all())
self.assertEqual(0, self.public_course.students.count())
def test_student_cannot_unsubscribe_because_course_even_if_is_a_draft(self):
user = get_user_model().objects.get(pk=2)
self.public_course.state = CourseState.DRAFT.name
self.public_course.registration_enabled = True
self.assertFalse(self.public_course.can_register)
self.public_course.students.add(user)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
self.public_course.unsubscribe_student(user)
self.assertNotIn(user, self.public_course.students.all())
self.assertEqual(0, self.public_course.students.count())
def test_student_cannot_unsubscribe_because_course_even_if_is_archived(self):
user = get_user_model().objects.get(pk=2)
self.public_course.state = CourseState.ARCHIVED.name
self.public_course.registration_enabled = True
self.assertFalse(self.public_course.can_register)
self.public_course.students.add(user)
self.assertIn(user, self.public_course.students.all())
self.assertEqual(1, self.public_course.students.count())
self.public_course.state = CourseState.ARCHIVED.name
self.public_course.unsubscribe_student(user)
self.assertNotIn(user, self.public_course.students.all())
self.assertEqual(0, self.public_course.students.count())
def test_cannot_unsubscribe_because_not_a_student(self):
user = get_user_model().objects.get(pk=2)
self.assertNotIn(user, self.public_course.students.all())
with self.assertRaises(UserIsNotStudent):
self.public_course.unsubscribe_student(user)
self.assertNotIn(user, self.public_course.students.all())
def test_can_unsubscribe(self):
user = get_user_model().objects.get(pk=2)
self.assertNotIn(user, self.public_course.students.all())
self.public_course.students.add(user)
self.assertIn(user, self.public_course.students.all())
self.public_course.unsubscribe_student(user)
self.assertNotIn(user, self.public_course.students.all())
"""
Method add_collaborator
"""
def test_cannot_add_collaborator_because_is_already_author(self):
user = self.private_course.author
with self.assertRaises(UserIsAlreadyAuthor):
self.private_course.add_collaborator(user, CollaboratorRole.OWNER)
self.assertNotIn(user, self.private_course.collaborators.all())
self.assertEqual(0, self.private_course.collaborators.count())
def test_cannot_add_collaborator_if_already_collaborator(self):
user = get_user_model().objects.get(pk=2)
ca = CourseCollaborator.objects.create(collaborator=user, course=self.public_course,
role=CollaboratorRole.TEACHER.name)
self.private_course.course_collaborators.add(ca)
self.assertIn(user, self.private_course.collaborators.all())
self.assertEqual(1, self.private_course.collaborators.count())
with self.assertRaises(UserIsAlreadyCollaborator):
self.private_course.add_collaborator(user, CollaboratorRole.OWNER)
self.assertIn(user, self.private_course.collaborators.all())
self.assertEqual(1, self.private_course.collaborators.count())
def test_cannot_add_collaborator_if_already_student(self):
user = get_user_model().objects.get(pk=2)
cr = RegistrationOnCourse.objects.create(course=self.private_course, student=user)
self.private_course.registrations.add(cr)
self.assertIn(user, self.private_course.students.all())
self.assertNotIn(user, self.private_course.collaborators.all())
self.assertEqual(1, self.private_course.students.count())
self.assertEqual(0, self.private_course.collaborators.count())
with self.assertRaises(UserIsAlreadyStudent):
self.private_course.add_collaborator(user, CollaboratorRole.OWNER)
self.assertIn(user, self.private_course.students.all())
self.assertNotIn(user, self.private_course.collaborators.all())
self.assertEqual(1, self.private_course.students.count())
self.assertEqual(0, self.private_course.collaborators.count())
def test_can_add_collaborator(self):
user = get_user_model().objects.get(pk=2)
self.private_course.add_collaborator(user, CollaboratorRole.TEACHER)
self.assertIn(user, self.private_course.collaborators.all())
self.assertEqual(1, self.private_course.collaborators.count())
"""
Method change_collaborator_role
"""
def test_cannot_change_collaborator_role_because_is_not_already_one(self):
user = get_user_model().objects.get(pk=2)
self.assertNotIn(user, self.private_course.collaborators.all())
self.assertEqual(0, self.private_course.collaborators.count())
with self.assertRaises(UserNotCollaboratorError):
self.private_course.change_collaborator_role(user, CollaboratorRole.NON_EDITOR_TEACHER)
self.assertNotIn(user, self.private_course.collaborators.all())
self.assertEqual(0, self.private_course.collaborators.count())
def test_change_collaborator_role(self):
user = get_user_model().objects.get(pk=3)
ca = CourseCollaborator.objects.create(collaborator=user, course=self.public_course,
role=CollaboratorRole.TEACHER.name)
self.private_course.course_collaborators.add(ca)
self.assertIn(user, self.private_course.collaborators.all())
self.assertEqual(1, self.private_course.collaborators.count())
for c in self.private_course.course_collaborators.all():
if c.collaborator == user:
self.assertEqual(CollaboratorRole.TEACHER.name, c.role)
self.private_course.change_collaborator_role(user, CollaboratorRole.OWNER)
self.assertIn(user, self.private_course.collaborators.all())
self.assertEqual(1, self.private_course.collaborators.count())
for c in self.private_course.course_collaborators.all():
if c.collaborator == user:
self.assertEqual(CollaboratorRole.OWNER.name, c.role)
"""
Method remove_collaborator
"""
def test_cannot_remove_collaborator_because_is_not_already_one(self):
user = get_user_model().objects.get(pk=2)
self.assertNotIn(user, self.private_course.collaborators.all())
self.assertEqual(0, self.private_course.collaborators.count())
with self.assertRaises(UserNotCollaboratorError):
self.private_course.remove_collaborator(user)
self.assertNotIn(user, self.private_course.collaborators.all())
self.assertEqual(0, self.private_course.collaborators.count())
def test_remove_collaborator_from_course(self):
user = get_user_model().objects.get(pk=3)
ca = CourseCollaborator.objects.create(collaborator=user, course=self.public_course,
role=CollaboratorRole.TEACHER.name)
self.private_course.course_collaborators.add(ca)
self.assertIn(user, self.private_course.collaborators.all())
self.assertEqual(1, self.private_course.collaborators.count())
self.private_course.remove_collaborator(user)
self.assertEqual(0, self.private_course.collaborators.count())
self.assertNotIn(user, self.private_course.collaborators.all())
"""
Method add_activity
"""
def test_cannot_add_activity_because_course_is_read_only(self):
activity = Activity.objects.create(
name="An activity",
description="An activity description",
author=get_user_model().objects.get(pk=1)
)
self.students_only_course.state = CourseState.ARCHIVED.name
self.assertTrue(self.students_only_course.read_only)
self.assertNotIn(activity, self.students_only_course.activities)
self.assertEqual(0, self.students_only_course.course_activities.count())
with self.assertRaises(ChangeActivityOnCourseError):
self.students_only_course.add_activity(activity)
self.assertNotIn(activity, self.students_only_course.activities)
self.assertEqual(0, self.students_only_course.course_activities.count())
def test_cannot_add_activity_because_activity_is_already_linked(self):
activity = Activity.objects.create(
name="An activity",
description="An activity description",
author=get_user_model().objects.get(pk=1)
)
CourseActivity.objects.create(
course=self.students_only_course, activity=activity, rank=5
)
self.assertIn(activity, self.students_only_course.activities)
self.assertEqual(1, self.students_only_course.course_activities.count())
with self.assertRaises(ActivityAlreadyOnCourseError):
self.students_only_course.add_activity(activity)
self.assertIn(activity, self.students_only_course.activities)
self.assertEqual(1, self.students_only_course.course_activities.count())
def test_cannot_add_activity_because_activity_cannot_be_reused(self):
activity = Activity.objects.create(
name="An activity",
description="An activity description",
author=get_user_model().objects.get(pk=1),
reuse=ActivityReuse.NON_REUSABLE.name
)
self.assertNotIn(activity, self.students_only_course.activities)
self.assertEqual(0, self.students_only_course.course_activities.count())
with self.assertRaises(ActivityNotReusableError):
self.assertFalse(self.students_only_course.add_activity(activity))
self.assertNotIn(activity, self.students_only_course.activities)
self.assertEqual(0, self.students_only_course.course_activities.count())
def test_add_activity(self):
activity = Activity.objects.create(
id=99,
name="An activity",
description="An activity description",
author=get_user_model().objects.get(pk=1),
)
self.assertNotIn(activity, self.students_only_course.activities)
self.assertEqual(0, self.students_only_course.course_activities.count())
self.students_only_course.add_activity(activity)
self.assertIn(activity, self.students_only_course.activities)
self.assertEqual(1, self.students_only_course.course_activities.count())
activity = Activity.objects.create(
id=98,
name="An activity",
description="An activity description",
author=get_user_model().objects.get(pk=1),
)
self.students_only_course.add_activity(activity)
self.assertIn(activity, self.students_only_course.activities)
self.assertEqual(2, self.students_only_course.course_activities.count())
activity = Activity.objects.create(
id=97,
name="An activity",
description="An activity description",
author=get_user_model().objects.get(pk=1),
)
self.students_only_course.add_activity(activity)
self.assertIn(activity, self.students_only_course.activities)
self.assertEqual(3, self.students_only_course.course_activities.count())
self.assertEqual(1, self.students_only_course.course_activities.filter(activity_id=99).get().rank)
self.assertEqual(2, self.students_only_course.course_activities.filter(activity_id=98).get().rank)
self.assertEqual(3, self.students_only_course.course_activities.filter(activity_id=97).get().rank)
"""
Method remove_activity
"""
def test_cannot_remove_activity_because_course_is_read_only(self):
activity = Activity.objects.create(
name="An activity",
description="An activity description",
author=get_user_model().objects.get(pk=1)
)
CourseActivity.objects.create(
course=self.students_only_course, activity=activity, rank=5
)
self.students_only_course.state = CourseState.ARCHIVED.name
self.assertTrue(self.students_only_course.read_only)
self.assertIn(activity, self.students_only_course.activities)
self.assertEqual(1, self.students_only_course.course_activities.count())
with self.assertRaises(ChangeActivityOnCourseError):
self.students_only_course.remove_activity(activity)
self.assertIn(activity, self.students_only_course.activities)
self.assertEqual(1, self.students_only_course.course_activities.count())
def test_cannot_remove_activity_because_activity_is_not_linked_with_the_course(self):
activity = Activity.objects.create(
name="An activity",
description="An activity description",
author=get_user_model().objects.get(pk=1)
)
self.assertNotIn(activity, self.students_only_course.activities)
self.assertEqual(0, self.students_only_course.course_activities.count())
with self.assertRaises(ActivityIsNotLinkedWithThisCourseError):
self.students_only_course.remove_activity(activity)
self.assertNotIn(activity, self.students_only_course.activities)
self.assertEqual(0, self.students_only_course.course_activities.count())
def test_remove_activity(self):
activity = Activity.objects.create(
name="An activity",
description="An activity description",
author=get_user_model().objects.get(pk=1)
)
CourseActivity.objects.create(
course=self.students_only_course, activity=activity, rank=5
)
self.assertIn(activity, self.students_only_course.activities)
self.assertEqual(1, self.students_only_course.course_activities.count())
self.students_only_course.remove_activity(activity)
self.assertNotIn(activity, self.students_only_course.activities)
self.assertEqual(0, self.students_only_course.course_activities.count())
"""
Property activities
"""
def test_activities(self):
CourseActivity.objects.create(
rank=1, course=self.students_only_course, activity=self.activity1
)
CourseActivity.objects.create(
rank=2, course=self.students_only_course, activity=self.activity2
)
CourseActivity.objects.create(
rank=3, course=self.students_only_course, activity=self.activity3
)
CourseActivity.objects.create(
rank=4, course=self.students_only_course, activity=self.activity4
)
self.assertEqual(4, self.students_only_course.course_activities.count())
rank = 1
for activity in self.students_only_course.activities:
self.assertIsInstance(activity, Activity)
self.assertEqual(rank, CourseActivity.objects.filter(
course=self.students_only_course, activity=activity
).get().rank)
rank += 1
def test_course_activities_ordered_by_rank(self):
CourseActivity.objects.create(
rank=1, course=self.students_only_course, activity=self.activity1
)
CourseActivity.objects.create(
rank=2, course=self.students_only_course, activity=self.activity2
)
CourseActivity.objects.create(
rank=3, course=self.students_only_course, activity=self.activity3
)
CourseActivity.objects.create(
rank=4, course=self.students_only_course, activity=self.activity4
)
self.assertEqual(4, self.students_only_course.course_activities.count())
rank = 1
for course_activity in self.students_only_course.course_activities.all():
self.assertEqual(rank, course_activity.rank)
rank += 1
"""
Property read_only
"""
def test_read_only(self):
for course in (self.public_course, self.collaborators_only_course,
self.students_only_course, self.private_course):
course.state = CourseState.ARCHIVED.name
self.assertTrue(course.read_only)
"""
Others
"""
def test_order_in_course_access(self):
self.assertLess(CourseAccess.PUBLIC, CourseAccess.STUDENTS_ONLY)
self.assertLess(CourseAccess.PUBLIC, CourseAccess.COLLABORATORS_ONLY)
self.assertLess(CourseAccess.PUBLIC, CourseAccess.PRIVATE)
self.assertLess(CourseAccess.STUDENTS_ONLY, CourseAccess.COLLABORATORS_ONLY)
self.assertLess(CourseAccess.STUDENTS_ONLY, CourseAccess.PRIVATE)
self.assertLess(CourseAccess.COLLABORATORS_ONLY, CourseAccess.PRIVATE)
self.assertGreater(CourseAccess.PRIVATE, CourseAccess.COLLABORATORS_ONLY)
self.assertGreater(CourseAccess.PRIVATE, CourseAccess.STUDENTS_ONLY)
self.assertGreater(CourseAccess.PRIVATE, CourseAccess.PUBLIC)
self.assertGreater(CourseAccess.COLLABORATORS_ONLY, CourseAccess.STUDENTS_ONLY)
self.assertGreater(CourseAccess.COLLABORATORS_ONLY, CourseAccess.PUBLIC)
self.assertGreater(CourseAccess.STUDENTS_ONLY, CourseAccess.PUBLIC)
for func in [self.assertEqual, self.assertGreaterEqual, self.assertLessEqual]:
func(CourseAccess.PUBLIC, CourseAccess.PUBLIC)
func(CourseAccess.STUDENTS_ONLY, CourseAccess.STUDENTS_ONLY)
func(CourseAccess.COLLABORATORS_ONLY, CourseAccess.COLLABORATORS_ONLY)
func(CourseAccess.PRIVATE, CourseAccess.PRIVATE)
def test_reorder_activities(self):
# Check that previous order is not changed
for rank in range(1, 4):
self.assertEqual(getattr(self, 'ca{}'.format(rank)).rank, rank * 10)
# Reorder activities
self.public_course.reorder_course_activities()
# Check that new order is properly set
rank = 1
for ca in CourseActivity.objects.filter(course=self.public_course).all():
self.assertEqual(ca.rank, rank)
rank += 1
def test_reorder_activities_nothing_to_do(self):
# Reorder manually before calling the method
rank = 1
for ca in CourseActivity.objects.filter(course=self.public_course).all():
ca.rank = rank
rank += 1
ca.save()
# Check that reordering is correct
rank = 1
for ca in CourseActivity.objects.filter(course=self.public_course).all():
self.assertEqual(ca.rank, rank)
rank += 1
# Reorder activities
self.public_course.reorder_course_activities()
# Check that new order is not changed
rank = 1
for ca in CourseActivity.objects.filter(course=self.public_course).all():
self.assertEqual(ca.rank, rank)
rank += 1
"""
Method clean
"""
def test_clean_error_registration_on_draft(self):
self.public_course.registration_enabled = True
self.public_course.state = CourseState.DRAFT.name
with self.assertRaises(ValidationError):
self.public_course.clean()
def test_clean_error_registration_on_archived(self):
self.public_course.registration_enabled = True
self.public_course.state = CourseState.ARCHIVED.name
with self.assertRaises(ValidationError):
self.public_course.clean()
def test_clean_error_access_private_state_published(self):
self.public_course.state = CourseState.PUBLISHED.name
self.public_course.access = CourseAccess.PRIVATE.name
with self.assertRaises(ValidationError):
self.public_course.clean()
def test_clean_error_access_collaborators_only_state_published(self):
self.public_course.state = CourseState.PUBLISHED.name
self.public_course.access = CourseAccess.COLLABORATORS_ONLY.name
with self.assertRaises(ValidationError):
self.public_course.clean()
def test_clean_error_author_in_students(self):
user = self.public_course.author
self.public_course.students.add(user)
with self.assertRaises(ValidationError):
self.public_course.clean()
def test_clean_error_author_in_collaborators(self):
user = self.public_course.author
CourseCollaborator.objects.create(
collaborator=user,
course=self.public_course,
role=CollaboratorRole.TEACHER.name
)
with self.assertRaises(ValidationError):
self.public_course.clean()
def test_clean_access_students_only_state_published(self):
self.public_course.state = CourseState.PUBLISHED.name
self.public_course.access = CourseAccess.STUDENTS_ONLY.name
self.public_course.clean()
def test_clean_access_public_state_published(self):
self.public_course.state = CourseState.PUBLISHED.name
self.public_course.access = CourseAccess.PUBLIC.name
self.public_course.clean()
class TestCourseManager(CourseTestCase):
def test_get_public_courses(self):
self.assertEqual(self.public_course.access, CourseAccess.PUBLIC.name)
self.assertEqual(1, Course.objects.public().count())
self.assertIn(self.public_course, Course.objects.public().all())
self.assertNotIn(self.students_only_course, Course.objects.public().all())
self.assertNotIn(self.collaborators_only_course, Course.objects.public().all())
self.assertNotIn(self.private_course, Course.objects.public().all())
self.students_only_course.access = CourseAccess.PUBLIC.name
self.students_only_course.save()
self.assertEqual(2, Course.objects.public().count())
self.assertIn(self.public_course, Course.objects.public().all())
self.assertIn(self.students_only_course, Course.objects.public().all())
self.assertNotIn(self.collaborators_only_course, Course.objects.public().all())
self.assertNotIn(self.private_course, Course.objects.public().all())
self.collaborators_only_course.access = CourseAccess.PUBLIC.name
self.collaborators_only_course.save()
self.assertEqual(3, Course.objects.public().count())
self.assertIn(self.public_course, Course.objects.public().all())
self.assertIn(self.students_only_course, Course.objects.public().all())
self.assertIn(self.collaborators_only_course, Course.objects.public().all())
self.assertNotIn(self.private_course, Course.objects.public().all())
self.private_course.access = CourseAccess.PUBLIC.name
self.private_course.save()
self.assertEqual(4, Course.objects.public().count())
self.assertIn(self.public_course, Course.objects.public().all())
self.assertIn(self.students_only_course, Course.objects.public().all())
self.assertIn(self.collaborators_only_course, Course.objects.public().all())
self.assertIn(self.private_course, Course.objects.public().all())
def test_get_public_course_filter(self):
self.assertEqual(self.public_course.access, CourseAccess.PUBLIC.name)
self.students_only_course.access = CourseAccess.PUBLIC.name
self.students_only_course.save()
self.collaborators_only_course.access = CourseAccess.PUBLIC.name
self.collaborators_only_course.save()
self.private_course.access = CourseAccess.PUBLIC.name
self.private_course.save()
self.assertEqual(4, Course.objects.public().count())
self.assertEqual(1, Course.objects.public(query="public").count())
self.assertIn(self.public_course, Course.objects.public(query="public"))
self.assertEqual(1, Course.objects.public(query="PUBLIC").count())
self.assertIn(self.public_course, Course.objects.public(query="PUBLIC"))
def test_get_written_by_courses(self):
user = get_user_model().objects.get(pk=1)
user_2 = get_user_model().objects.get(pk=2)
self.assertEqual(4, Course.objects.written_by(user).count())
self.collaborators_only_course.author = user_2
self.collaborators_only_course.save()
self.assertEqual(3, Course.objects.written_by(user).count())
self.assertNotIn(self.collaborators_only_course, Course.objects.written_by(user).all())
def test_get_written_by_courses_filter(self):
user = get_user_model().objects.get(pk=1)
self.assertEqual(4, Course.objects.written_by(user).count())
self.assertEqual(1, Course.objects.written_by(user, query="public").count())
self.assertIn(self.public_course, Course.objects.written_by(user, query="public"))
self.assertEqual(1, Course.objects.written_by(user, query="PUBLIC").count())
self.assertIn(self.public_course, Course.objects.written_by(user, query="PUBLIC"))
def test_get_taught_by(self):
teacher = get_user_model().objects.get(pk=2)
self.collaborators_only_course.author = teacher
self.collaborators_only_course.save()
CourseCollaborator.objects.create(collaborator=teacher, course=self.public_course,
role=CollaboratorRole.TEACHER.name)
self.assertEqual(2, Course.objects.taught_by(teacher).count())
self.assertIn(self.public_course, Course.objects.taught_by(teacher).all())
self.assertIn(self.collaborators_only_course, Course.objects.taught_by(teacher).all())
def test_get_taught_by_fitler(self):
teacher = get_user_model().objects.get(pk=2)
self.collaborators_only_course.author = teacher
self.collaborators_only_course.save()
CourseCollaborator.objects.create(collaborator=teacher, course=self.public_course,
role=CollaboratorRole.TEACHER.name)
self.assertEqual(1, Course.objects.taught_by(teacher, query="public").count())
self.assertIn(self.public_course, Course.objects.taught_by(teacher, query="public").all())
self.assertEqual(1, Course.objects.taught_by(teacher, query="PUBLIC").count())
self.assertIn(self.public_course, Course.objects.taught_by(teacher, query="PUBLIC").all())
def test_get_followed_by(self):
s1 = get_user_model().objects.get(pk=2)
RegistrationOnCourse.objects.create(course=self.public_course, student=s1)
RegistrationOnCourse.objects.create(course=self.students_only_course, student=s1)
RegistrationOnCourse.objects.create(course=self.collaborators_only_course, student=s1)
self.assertEqual(3, Course.objects.followed_by(s1).count())
self.assertIn(self.public_course, Course.objects.followed_by(s1).all())
self.assertIn(self.students_only_course, Course.objects.followed_by(s1).all())
self.assertIn(self.collaborators_only_course, Course.objects.followed_by(s1).all())
def test_get_followed_by_filter(self):
s1 = get_user_model().objects.get(pk=2)
RegistrationOnCourse.objects.create(course=self.public_course, student=s1)
RegistrationOnCourse.objects.create(course=self.students_only_course, student=s1)
RegistrationOnCourse.objects.create(course=self.collaborators_only_course, student=s1)
self.assertEqual(3, Course.objects.followed_by(s1).count())
self.assertEqual(1, Course.objects.followed_by(s1, query="public").count())
self.assertIn(self.public_course, Course.objects.followed_by(s1, query="public").all())
self.assertEqual(1, Course.objects.followed_by(s1, query="PUBLIC").count())
self.assertIn(self.public_course, Course.objects.followed_by(s1, query="PUBLIC").all())
def test_get_recommendations_for_public_published_course_no_link(self):
user = get_user_model().objects.get(pk=4)
self.public_course.state = CourseState.PUBLISHED.name
self.public_course.access = CourseAccess.PUBLIC.name
self.public_course.save()
self.assertEqual(1, Course.objects.recommendations_for(user).count())
self.assertIn(self.public_course, Course.objects.recommendations_for(user).all())
self.public_course.state = CourseState.DRAFT.name
self.public_course.access = CourseAccess.PUBLIC.name
self.public_course.save()
self.assertEqual(0, Course.objects.recommendations_for(user).count())
self.assertNotIn(self.public_course, Course.objects.recommendations_for(user).all())
self.public_course.state = CourseState.PUBLISHED.name
self.public_course.access = CourseAccess.STUDENTS_ONLY.name
self.public_course.save()
self.assertEqual(0, Course.objects.recommendations_for(user).count())
self.assertNotIn(self.public_course, Course.objects.recommendations_for(user).all())
def test_get_recommendations_for_public_published_not_as_author(self):
self.assertEqual(0, Course.objects.recommendations_for(self.public_course.author).count())
def test_get_recommendations_for_public_published_not_as_student(self):
student = get_user_model().objects.get(pk=4)
self.assertEqual(1, Course.objects.recommendations_for(student).count())
self.assertIn(self.public_course, Course.objects.recommendations_for(student).all())
RegistrationOnCourse.objects.create(course=self.public_course, student=student)
self.assertEqual(0, Course.objects.recommendations_for(student).count())
self.assertNotIn(self.public_course, Course.objects.recommendations_for(student).all())
def test_get_recommendations_for_public_published_not_as_teacher(self):
teacher = get_user_model().objects.get(pk=4)
self.assertEqual(1, Course.objects.recommendations_for(teacher).count())
self.assertIn(self.public_course, Course.objects.recommendations_for(teacher).all())
CourseCollaborator.objects.create(course=self.public_course, collaborator=teacher)
self.assertEqual(0, Course.objects.recommendations_for(teacher).count())
self.assertNotIn(self.public_course, Course.objects.recommendations_for(teacher).all())
def test_get_recommendations_for_filter(self):
user = get_user_model().objects.get(pk=5)
self.public_course.state = CourseState.PUBLISHED.name
self.public_course.access = CourseAccess.PUBLIC.name
self.public_course.save()
self.students_only_course.state = CourseState.PUBLISHED.name
self.students_only_course.access = CourseAccess.PUBLIC.name
self.students_only_course.save()
self.collaborators_only_course.state = CourseState.PUBLISHED.name
self.collaborators_only_course.access = CourseAccess.PUBLIC.name
self.collaborators_only_course.save()
self.private_course.state = CourseState.PUBLISHED.name
self.private_course.access = CourseAccess.PUBLIC.name
self.private_course.save()
self.assertEqual(4, Course.objects.recommendations_for(user).count())
self.assertEqual(1, Course.objects.recommendations_for(user, query="public").count())
self.assertIn(self.public_course, Course.objects.recommendations_for(user, query="public").all())
self.assertEqual(1, Course.objects.recommendations_for(user, query="PUBLIC").count())
self.assertIn(self.public_course, Course.objects.recommendations_for(user, query="PUBLIC").all())
| 48.629553
| 121
| 0.707615
| 8,147
| 70,756
| 5.876396
| 0.034737
| 0.077702
| 0.100595
| 0.048251
| 0.919269
| 0.898987
| 0.866214
| 0.846433
| 0.825128
| 0.806078
| 0
| 0.005113
| 0.192888
| 70,756
| 1,454
| 122
| 48.662999
| 0.833211
| 0.030485
| 0
| 0.700893
| 0
| 0
| 0.070609
| 0.032215
| 0
| 0
| 0
| 0
| 0.333036
| 1
| 0.085714
| false
| 0
| 0.005357
| 0
| 0.094643
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a92c681699acc5d78c71341e4ae5bf86b947290c
| 40
|
py
|
Python
|
g_mlp_gpt/__init__.py
|
onlyrico/g-mlp-gpt
|
efee809ed3d0c54845395534d07b957d5c8ef5b2
|
[
"MIT"
] | 79
|
2021-05-20T02:50:02.000Z
|
2022-01-12T09:33:35.000Z
|
g_mlp_gpt/__init__.py
|
onlyrico/g-mlp-gpt
|
efee809ed3d0c54845395534d07b957d5c8ef5b2
|
[
"MIT"
] | null | null | null |
g_mlp_gpt/__init__.py
|
onlyrico/g-mlp-gpt
|
efee809ed3d0c54845395534d07b957d5c8ef5b2
|
[
"MIT"
] | 6
|
2021-05-20T03:09:24.000Z
|
2021-11-21T03:47:55.000Z
|
from g_mlp_gpt.g_mlp_gpt import gMLPGPT
| 20
| 39
| 0.875
| 9
| 40
| 3.444444
| 0.666667
| 0.258065
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
8d1733e8060061ada9879756508e6866170ba7a0
| 95
|
py
|
Python
|
src/xl_transform/reader/__init__.py
|
Spark-Liang/ExcelTransformer
|
8e827a7c0b0e2cf4832d7b4346e763f31e578343
|
[
"MIT"
] | 2
|
2019-04-06T14:01:49.000Z
|
2019-12-26T13:12:09.000Z
|
src/xl_transform/reader/__init__.py
|
Spark-Liang/ExcelTransformer
|
8e827a7c0b0e2cf4832d7b4346e763f31e578343
|
[
"MIT"
] | null | null | null |
src/xl_transform/reader/__init__.py
|
Spark-Liang/ExcelTransformer
|
8e827a7c0b0e2cf4832d7b4346e763f31e578343
|
[
"MIT"
] | null | null | null |
from xl_transform.reader.DataFrameReader import *
from xl_transform.reader.FileReader import *
| 31.666667
| 49
| 0.852632
| 12
| 95
| 6.583333
| 0.583333
| 0.151899
| 0.379747
| 0.531646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084211
| 95
| 2
| 50
| 47.5
| 0.908046
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a5e6699bffd4230135b7124c415060e668ca8ec7
| 97
|
py
|
Python
|
torch_ac_pnn/utils/__init__.py
|
winnieyangwannan/winnie-pnn
|
65f78a3f102679471546c898761c28d6ca522dfd
|
[
"MIT"
] | null | null | null |
torch_ac_pnn/utils/__init__.py
|
winnieyangwannan/winnie-pnn
|
65f78a3f102679471546c898761c28d6ca522dfd
|
[
"MIT"
] | null | null | null |
torch_ac_pnn/utils/__init__.py
|
winnieyangwannan/winnie-pnn
|
65f78a3f102679471546c898761c28d6ca522dfd
|
[
"MIT"
] | null | null | null |
from torch_ac_pnn.utils.dictlist import DictList
from torch_ac_pnn.utils.penv import ParallelEnv
| 32.333333
| 48
| 0.876289
| 16
| 97
| 5.0625
| 0.5625
| 0.222222
| 0.271605
| 0.345679
| 0.469136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082474
| 97
| 2
| 49
| 48.5
| 0.910112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
57329cd3cdb5bfc1f2a60885a430ac8c82117037
| 42
|
py
|
Python
|
demo/py/hello_world.py
|
racheldotey/uvmcomputes-docs
|
83b4773944e3d6c15ad9293fce7c521ceba4b8d7
|
[
"MIT"
] | null | null | null |
demo/py/hello_world.py
|
racheldotey/uvmcomputes-docs
|
83b4773944e3d6c15ad9293fce7c521ceba4b8d7
|
[
"MIT"
] | null | null | null |
demo/py/hello_world.py
|
racheldotey/uvmcomputes-docs
|
83b4773944e3d6c15ad9293fce7c521ceba4b8d7
|
[
"MIT"
] | null | null | null |
def hi() :
return "Hello world!"
hi()
| 10.5
| 25
| 0.547619
| 6
| 42
| 3.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.261905
| 42
| 4
| 26
| 10.5
| 0.741935
| 0
| 0
| 0
| 0
| 0
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
9389ac7d1517b72986649feba1f39ed001fb0616
| 761
|
py
|
Python
|
Tree/treeInterface.py
|
amal029/DataStructuresAndAlgorithmsInPython
|
ccf36ae9e6d1ab8c2be09315f4ad6ac715e222fd
|
[
"MIT"
] | null | null | null |
Tree/treeInterface.py
|
amal029/DataStructuresAndAlgorithmsInPython
|
ccf36ae9e6d1ab8c2be09315f4ad6ac715e222fd
|
[
"MIT"
] | null | null | null |
Tree/treeInterface.py
|
amal029/DataStructuresAndAlgorithmsInPython
|
ccf36ae9e6d1ab8c2be09315f4ad6ac715e222fd
|
[
"MIT"
] | null | null | null |
import abc
class TreeInterface(abc.ABC):
@abc.abstractmethod
def element(self):
raise NotImplementedError('ADTs should implement this method')
@abc.abstractmethod
def root(self):
raise NotImplementedError('ADTs should implement this method')
@abc.abstractmethod
def parent(self, v):
raise NotImplementedError('ADTs should implement this method')
@abc.abstractmethod
def children(self, v):
raise NotImplementedError('ADTs should implement this method')
@abc.abstractmethod
def __str__(self):
raise NotImplementedError('ADTs should implement this method')
@abc.abstractmethod
def __iter__(self):
raise NotImplementedError('ADTs should implement this method')
| 26.241379
| 70
| 0.70565
| 80
| 761
| 6.6125
| 0.2625
| 0.192817
| 0.226843
| 0.385633
| 0.839319
| 0.839319
| 0.839319
| 0.839319
| 0.731569
| 0.731569
| 0
| 0
| 0.215506
| 761
| 28
| 71
| 27.178571
| 0.886097
| 0
| 0
| 0.6
| 0
| 0
| 0.260184
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| false
| 0
| 0.05
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
93db57f2d19621a2aceb64cf6ad321a53137bd85
| 19,587
|
py
|
Python
|
dataset/cifar.py
|
jhcknzzm/SSFL-Benchmarking-Semi-supervised-Federated-Learning
|
b4ff89f5a6296cd10eb2cd5d8577725bc09577a8
|
[
"MIT"
] | 36
|
2020-08-27T02:58:40.000Z
|
2022-03-30T08:24:31.000Z
|
dataset/cifar.py
|
jhcknzzm/SSFL-Benchmarking-Semi-supervised-Federated-Learning
|
b4ff89f5a6296cd10eb2cd5d8577725bc09577a8
|
[
"MIT"
] | 4
|
2020-10-27T02:56:38.000Z
|
2021-11-27T04:21:19.000Z
|
dataset/cifar.py
|
jhcknzzm/SSFL-Benchmarking-Semi-supervised-Federated-Learning
|
b4ff89f5a6296cd10eb2cd5d8577725bc09577a8
|
[
"MIT"
] | 8
|
2020-11-16T08:17:29.000Z
|
2022-03-10T05:58:53.000Z
|
import logging
import numpy as np
from PIL import Image
from torchvision import datasets
from torchvision import transforms
import copy
from .randaugment import RandAugmentMC
import random
import numpy as np
seed_value = 1
random.seed(seed_value)
np.random.seed(seed_value)
logger = logging.getLogger(__name__)
cifar10_mean = (0.4914, 0.4822, 0.4465)
cifar10_std = (0.2471, 0.2435, 0.2616)
cifar100_mean = (0.5071, 0.4867, 0.4408)
cifar100_std = (0.2675, 0.2565, 0.2761)
normal_mean = (0.5, 0.5, 0.5)
normal_std = (0.5, 0.5, 0.5)
def get_cifar10(root, num_expand_x, num_expand_u,device_ids, server_idxs):
root='./data'
transform_labeled = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=32,
padding=int(32*0.125),
padding_mode='reflect'),
transforms.ToTensor(),
transforms.Normalize(mean=cifar10_mean, std=cifar10_std)
])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=cifar10_mean, std=cifar10_std)
])
base_dataset = datasets.CIFAR10(root, train=True, download=False)
# train_labeled_idxs, train_unlabeled_idxs = x_u_split(
# base_dataset.targets, num_expand_x, num_expand_u, device_ids,server_idxs)
train_labeled_idxs, train_unlabeled_idxs = server_idxs, device_ids
train_labeled_dataset = CIFAR10SSL(
root, train_labeled_idxs, train=True,
transform=transform_labeled)
train_unlabeled_dataset_list = []
for id in range(len(train_unlabeled_idxs)):
train_unlabeled_dataset = CIFAR10SSL(
root, train_unlabeled_idxs[id], train=True,
transform=TransformFix(mean=cifar10_mean, std=cifar10_std))
train_unlabeled_dataset_list.append(train_unlabeled_dataset)
test_dataset = datasets.CIFAR10(
root, train=False, transform=transform_val, download=False)
logger.info("Dataset: CIFAR10")
return train_labeled_dataset, train_unlabeled_dataset_list, test_dataset, base_dataset
def get_cifar10_semi(root, num_expand_x, num_expand_u,device_ids, server_idxs):
root='./data'
transform_labeled = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=32,
padding=int(32*0.125),
padding_mode='reflect'),
transforms.ToTensor(),
transforms.Normalize(mean=cifar10_mean, std=cifar10_std)
])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=cifar10_mean, std=cifar10_std)
])
base_dataset = datasets.CIFAR10(root, train=True, download=False)
train_labeled_idxs, train_unlabeled_idxs = x_u_split_semi_cifar(
base_dataset.targets, num_expand_x, num_expand_u, device_ids, server_idxs)
# train_labeled_idxs, train_unlabeled_idxs = server_idxs, device_ids
train_unlabeled_dataset_list = []
train_labeled_dataset_list = []
for id in range(len(train_unlabeled_idxs)):
print(id)
train_unlabeled_dataset = CIFAR10SSL(
root, train_unlabeled_idxs[id], train=True,
transform=TransformFix(mean=cifar10_mean, std=cifar10_std))
train_labeled_dataset = CIFAR10SSL(
root, train_labeled_idxs[id], train=True,
transform=transform_labeled)
train_unlabeled_dataset_list.append(train_unlabeled_dataset)
train_labeled_dataset_list.append(train_labeled_dataset)
test_dataset = datasets.CIFAR10(
root, train=False, transform=transform_val, download=False)
logger.info("Dataset: CIFAR10")
return train_labeled_dataset_list, train_unlabeled_dataset_list, test_dataset, base_dataset
def get_svhn(root, num_expand_x, num_expand_u,device_ids, server_idxs):
root='./data'
transform_labeled = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=32,
padding=int(32*0.125),
padding_mode='reflect'),
transforms.ToTensor(),
transforms.Normalize(mean=cifar10_mean, std=cifar10_std)
])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=cifar10_mean, std=cifar10_std)
])
base_dataset = datasets.SVHN(root, split='train', download=False)
# train_labeled_idxs, train_unlabeled_idxs = x_u_split(
# base_dataset.labels, num_expand_x, num_expand_u, device_ids,server_idxs)
train_labeled_idxs, train_unlabeled_idxs = server_idxs, device_ids
train_labeled_dataset = SVHNSSL(
root, train_labeled_idxs, split='train',
transform=transform_labeled)
train_unlabeled_dataset_list = []
train_unlabeled_idxs_tmp = copy.deepcopy(train_unlabeled_idxs[0])
import functools
import operator
for id in range(len(train_unlabeled_idxs)):
train_unlabeled_dataset = SVHNSSL(
root, train_unlabeled_idxs[id], split='train',
transform=TransformFix(mean=cifar10_mean, std=cifar10_std))
train_unlabeled_dataset_list.append(train_unlabeled_dataset)
test_dataset = datasets.SVHN(
root, split='train', transform=transform_val, download=False)
logger.info("Dataset: SVHN")
return train_labeled_dataset, train_unlabeled_dataset_list, test_dataset, base_dataset
def get_cifar100(root, num_labeled, num_expand_x, num_expand_u):
transform_labeled = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=32,
padding=int(32*0.125),
padding_mode='reflect'),
transforms.ToTensor(),
transforms.Normalize(mean=cifar100_mean, std=cifar100_std)])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=cifar100_mean, std=cifar100_std)])
base_dataset = datasets.CIFAR100(
root, train=True, download=True)
train_labeled_idxs, train_unlabeled_idxs = x_u_split(
base_dataset.targets, num_classes=100)
train_labeled_dataset = CIFAR100SSL(
root, train_labeled_idxs, train=True,
transform=transform_labeled)
train_unlabeled_dataset = CIFAR100SSL(
root, train_unlabeled_idxs, train=True,
transform=TransformFix(mean=cifar100_mean, std=cifar100_std))
test_dataset = datasets.CIFAR100(
root, train=False, transform=transform_val, download=False)
logger.info("Dataset: CIFAR100")
logger.info(f"Labeled examples: {len(train_labeled_idxs)}"
f" Unlabeled examples: {len(train_unlabeled_idxs)}")
return train_labeled_dataset, train_unlabeled_dataset, test_dataset
def get_emnist(root, num_expand_x, num_expand_u,device_ids, server_idxs, attack_idxs=None):
root='./data'
transform_labeled = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=28,
padding=int(28*0.125),
padding_mode='reflect'),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
base_dataset = datasets.EMNIST(root, train=True,split='balanced', download=True)
# train_labeled_idxs, train_unlabeled_idxs = x_u_split(
# base_dataset.targets, num_expand_x, num_expand_u, device_ids,server_idxs)
train_labeled_idxs, train_unlabeled_idxs = server_idxs, device_ids
train_labeled_dataset = EMNIST(
root, train_labeled_idxs, train=True,
transform=transform_labeled)
if attack_idxs is not None:
train_attack_dataset = EMNIST(
root, attack_idxs, train=True,
transform=transform_labeled)
train_unlabeled_dataset_list = []
print('len(train_unlabeled_idxs):',len(train_unlabeled_idxs))
for id in range(len(train_unlabeled_idxs)):
train_unlabeled_dataset = EMNIST(
root, train_unlabeled_idxs[id], train=True,
transform=TransformFix(size = 28, mean=(0.1307,), std=(0.3081,)))
train_unlabeled_dataset_list.append(train_unlabeled_dataset)
test_dataset = datasets.EMNIST(
root, train=False,split='balanced', transform=transform_val, download=True)
if attack_idxs is not None:
return train_labeled_dataset, train_unlabeled_dataset_list, test_dataset, train_attack_dataset, base_dataset
else:
return train_labeled_dataset, train_unlabeled_dataset_list, test_dataset, base_dataset
def get_emnist_semi(root, num_expand_x, num_expand_u,device_ids, server_idxs):
root='./data'
transform_labeled = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=28,
padding=int(28*0.125),
padding_mode='reflect'),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
base_dataset = datasets.EMNIST(root, train=True,split='balanced', download=True)
train_labeled_idxs, train_unlabeled_idxs = x_u_split_semi(
base_dataset.targets, num_expand_x, num_expand_u, device_ids, server_idxs)
train_unlabeled_dataset_list = []
train_labeled_dataset_list = []
train_unlabeled_idxs_tmp = copy.deepcopy(train_unlabeled_idxs[0])
for id in range(len(train_unlabeled_idxs)):
train_unlabeled_dataset = EMNIST(
root, train_unlabeled_idxs[id], train=True,
transform=TransformFix(size = 28, mean=(0.1307,), std=(0.3081,)))
train_unlabeled_dataset_list.append(train_unlabeled_dataset)
train_labeled_dataset = EMNIST(
root, train_labeled_idxs[id], train=True,
transform=transform_labeled)
train_labeled_dataset_list.append(train_labeled_dataset)
test_dataset = datasets.EMNIST(
root, train=False,split='balanced', transform=transform_val, download=True)
return train_labeled_dataset_list, train_unlabeled_dataset_list, test_dataset
def x_u_split(labels,
num_expand_x,
num_expand_u,
device_ids,
server_idxs):
labels = np.array(labels)
labeled_idx = copy.deepcopy(server_idxs)
unlabeled_idx = []
unlabeled_idx_list = []
for id in range(len(device_ids)):
unlabeled_idx = device_ids[id]
exapand_unlabeled = num_expand_u // len(device_ids[id]) // len(device_ids)
unlabeled_idx = np.hstack(
[unlabeled_idx for _ in range(exapand_unlabeled)])
if len(unlabeled_idx) < num_expand_u // len(device_ids):
diff = num_expand_u // len(device_ids) - len(unlabeled_idx)
unlabeled_idx = np.hstack(
(unlabeled_idx, np.random.choice(unlabeled_idx, diff)))
else:
assert len(unlabeled_idx) == num_expand_u // len(device_ids)
unlabeled_idx_list.append(unlabeled_idx)
exapand_labeled = num_expand_x // len(labeled_idx)
labeled_idx = np.hstack(
[labeled_idx for _ in range(exapand_labeled)])
if len(labeled_idx) < num_expand_x:
diff = num_expand_x - len(labeled_idx)
labeled_idx = np.hstack(
(labeled_idx, np.random.choice(labeled_idx, diff)))
else:
assert len(labeled_idx) == num_expand_x
return labeled_idx, unlabeled_idx_list
def x_u_split_semi(labels,
num_expand_x,
num_expand_u,
device_ids,
server_idxs):
server_semi_idxs = []
for i in range(len(device_ids)):
server_semi_idxs.append([])
num = len(server_idxs)//len(device_ids)
for id in range(len(device_ids)-1):
idx_tmp = server_idxs[id*num:(id+1)*num]
server_semi_idxs[id] = idx_tmp
server_semi_idxs[len(device_ids)-1] = server_idxs[(id+1)*num:]
labels = np.array(labels)
labeled_idx = copy.deepcopy(server_idxs)
unlabeled_idx = []
unlabeled_idx_list = []
for id in range(len(device_ids)):
unlabeled_idx = device_ids[id]
exapand_unlabeled = num_expand_u // len(device_ids[id]) // len(device_ids)
unlabeled_idx = np.hstack(
[unlabeled_idx for _ in range(exapand_unlabeled)])
if len(unlabeled_idx) < num_expand_u // len(device_ids):
diff = num_expand_u // len(device_ids) - len(unlabeled_idx)
unlabeled_idx = np.hstack(
(unlabeled_idx, np.random.choice(unlabeled_idx, diff)))
else:
assert len(unlabeled_idx) == num_expand_u // len(device_ids)
unlabeled_idx_list.append(unlabeled_idx)
labeled_idx_list = []
for id in range(len(device_ids)):
labeled_idx = server_semi_idxs[id]
exapand_unlabeled = num_expand_u // len(server_semi_idxs[id]) // len(server_semi_idxs)
labeled_idx = np.hstack(
[labeled_idx for _ in range(exapand_unlabeled)])
if len(labeled_idx) < num_expand_u // len(device_ids):
diff = num_expand_u // len(device_ids) - len(labeled_idx)
labeled_idx = np.hstack(
(labeled_idx, np.random.choice(labeled_idx, diff)))
else:
assert len(labeled_idx) == num_expand_u // len(device_ids)
labeled_idx_list.append(labeled_idx)
return labeled_idx_list, unlabeled_idx_list
def x_u_split_semi_cifar(labels,
num_expand_x,
num_expand_u,
device_ids,
server_idxs):
unlabeled_idx = []
unlabeled_idx_list = []
for id in range(len(device_ids)):
unlabeled_idx = device_ids[id]
exapand_unlabeled = num_expand_u // len(device_ids[id]) // len(device_ids)
unlabeled_idx = np.hstack(
[unlabeled_idx for _ in range(exapand_unlabeled)])
if len(unlabeled_idx) < num_expand_u // len(device_ids):
diff = num_expand_u // len(device_ids) - len(unlabeled_idx)
unlabeled_idx = np.hstack(
(unlabeled_idx, np.random.choice(unlabeled_idx, diff)))
else:
assert len(unlabeled_idx) == num_expand_u // len(device_ids)
unlabeled_idx_list.append(unlabeled_idx)
labeled_idx_list = []
for id in range(len(device_ids)):
labeled_idx = server_idxs[id]
exapand_unlabeled = num_expand_u // len(device_ids[id]) // len(device_ids)
labeled_idx = np.hstack(
[labeled_idx for _ in range(exapand_unlabeled)])
if len(labeled_idx) < num_expand_u // len(device_ids):
diff = num_expand_u // len(device_ids) - len(labeled_idx)
labeled_idx = np.hstack(
(labeled_idx, np.random.choice(labeled_idx, diff)))
else:
assert len(labeled_idx) == num_expand_u // len(device_ids)
labeled_idx_list.append(labeled_idx)
return labeled_idx_list, unlabeled_idx_list
class TransformFix(object):
def __init__(self, mean, std,size=32):
self.weak = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=size,
padding=int(size*0.125),
padding_mode='reflect')])
self.strong = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(size=size,
padding=int(size*0.125),
padding_mode='reflect'),
RandAugmentMC(n=2, m=10)])
self.normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
def __call__(self, x):
weak = self.weak(x)
strong = self.strong(x)
return self.normalize(weak), self.normalize(strong)
class CIFAR10SSL(datasets.CIFAR10):
def __init__(self, root, indexs, train=True,
transform=None, target_transform=None,
download=False):
super().__init__(root, train=train,
transform=transform,
target_transform=target_transform,
download=download)
if indexs is not None:
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
class EMNIST(datasets.EMNIST):
def __init__(self, root, indexs, train=True,
transform=None, target_transform=None,
download=True,split='balanced'):
super().__init__(root, train=train,
transform=transform,
target_transform=target_transform,split='balanced',
download=download)
if indexs is not None:
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = img.cpu().numpy()
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = target.cpu().numpy()
target = self.target_transform(target)
return img, target
class CIFAR100SSL(datasets.CIFAR100):
def __init__(self, root, indexs, train=True,
transform=None, target_transform=None,
download=False):
super().__init__(root, train=train,
transform=transform,
target_transform=target_transform,
download=download)
if indexs is not None:
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
class SVHNSSL(datasets.SVHN):
def __init__(self, root, indexs, split='train',
transform=None, target_transform=None,
download=False):
super().__init__(root, split='train',
transform=transform,
target_transform=target_transform,
download=download)
if indexs is not None:
self.data = self.data[indexs]
self.labels = np.array(self.labels)[indexs]
def __getitem__(self, index):
img, target = self.data[index], int(self.labels[index])
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
| 36.071823
| 116
| 0.649768
| 2,331
| 19,587
| 5.145002
| 0.060918
| 0.065371
| 0.02835
| 0.021679
| 0.873926
| 0.859335
| 0.845493
| 0.831568
| 0.811223
| 0.802468
| 0
| 0.022277
| 0.252872
| 19,587
| 542
| 117
| 36.138376
| 0.797253
| 0.023332
| 0
| 0.757869
| 0
| 0
| 0.017937
| 0.004079
| 0
| 0
| 0
| 0
| 0.014528
| 1
| 0.046005
| false
| 0
| 0.026634
| 0
| 0.121065
| 0.004843
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f5765f9948d34b09224e0da6707aff9176f76365
| 78,206
|
py
|
Python
|
obp/ope/estimators_multi.py
|
Tanvikapoor14/zr-obp
|
51eba00f0dda5c26c1fa6826f544c60de485da52
|
[
"Apache-2.0"
] | null | null | null |
obp/ope/estimators_multi.py
|
Tanvikapoor14/zr-obp
|
51eba00f0dda5c26c1fa6826f544c60de485da52
|
[
"Apache-2.0"
] | null | null | null |
obp/ope/estimators_multi.py
|
Tanvikapoor14/zr-obp
|
51eba00f0dda5c26c1fa6826f544c60de485da52
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Yuta Saito, Yusuke Narita, and ZOZO Technologies, Inc. All rights reserved.
# Licensed under the Apache 2.0 License.
"""Off-Policy Estimators."""
from abc import ABCMeta
from abc import abstractmethod
from dataclasses import dataclass
from typing import Dict
from typing import Optional
import numpy as np
from sklearn.utils import check_scalar
from ..utils import check_array
from ..utils import check_multi_loggers_ope_inputs
from ..utils import estimate_confidence_interval_by_bootstrap
@dataclass
class BaseMultiLoggersOffPolicyEstimator(metaclass=ABCMeta):
"""Base class for OPE estimators for multiple loggers."""
@abstractmethod
def _estimate_round_rewards(self) -> np.ndarray:
"""Estimate round-wise (or sample-wise) rewards."""
raise NotImplementedError
@abstractmethod
def estimate_policy_value(self) -> float:
"""Estimate the policy value of evaluation policy."""
raise NotImplementedError
@abstractmethod
def estimate_interval(self) -> Dict[str, float]:
"""Estimate the confidence interval of the policy value using bootstrap."""
raise NotImplementedError
@dataclass
class MultiLoggersNaiveInverseProbabilityWeighting(BaseMultiLoggersOffPolicyEstimator):
"""Multi-Loggers Inverse Probability Weighting (Multi-IPW) Estimator.
Note
-------
This estimator is called Naive IPS in Agarwal et al.(2018) and Averaged IS in Kallus et al.(2021).
Multi-IPW estimates the policy value of evaluation policy :math:`\\pi_e`
using logged data collected by multiple logging/behavior policies as
.. math::
\\hat{V}_{\\mathrm{Multi-IPW}} (\\pi_e; \\mathcal{D}) := \\mathbb{E}_{n} [ w_{k_i}(x_i,a_i) r_i],
where :math:`\\mathcal{D}_k=\\{(x_i,a_i,r_i)\\}_{i=1}^{n_k}` is logged bandit data with :math:`n_k` observations collected by
the k-th behavior policy :math:`\\pi_k`. :math:`w_k(x,a):=\\pi_e (a|x)/\\pi_k (a|x)` is the importance weight given :math:`x` and :math:`a` computed for the k-th behavior policy.
We can represent the whole logged bandit data as :math:`\\mathcal{D}=\\{(k_i,x_i,a_i,r_i)\\}_{i=1}^{n}` where :math:`k_i` is the index to indicate the logging/behavior policy that generates i-th data, i.e., :math:`\\pi_{k_i}`.
Note that :math:`n := \\sum_{k=1}^K` is the total number of logged bandit data.
:math:`\\mathbb{E}_{n}[\\cdot]` is the empirical average over :math:`n` observations in :math:`\\mathcal{D}`.
When the clipping is applied, a large importance weight is clipped as :math:`\\hat{w}_k(x,a) := \\min \\{ \\lambda, w_k(x,a) \\}`, where :math:`\\lambda (>0)` is a hyperparameter to specify a maximum allowed importance weight.
Multi-IPW applies the standard IPW to each stratum and takes the weighted average of the K datasets.
Parameters
------------
lambda_: float, default=np.inf
A maximum possible value of the importance weight.
When a positive finite value is given, importance weights larger than `lambda_` will be clipped.
use_estimated_pscore: bool, default=False.
If True, `estimated_pscore` is used, otherwise, `pscore` (the true propensity scores) is used.
estimator_name: str, default='multi_ipw'.
Name of the estimator.
References
------------
Aman Agarwal, Soumya Basu, Tobias Schnabel, and Thorsten Joachims.
"Effective Evaluation using Logged Bandit Feedback from Multiple Loggers.", 2018.
Nathan Kallus, Yuta Saito, and Masatoshi Uehara.
"Optimal Off-Policy Evaluation from Multiple Logging Policies.", 2021.
"""
lambda_: float = np.inf
use_estimated_pscore: bool = False
estimator_name: str = "multi_ipw"
def __post_init__(self) -> None:
"""Initialize Class."""
check_scalar(
self.lambda_,
name="lambda_",
target_type=(int, float),
min_val=0.0,
)
if self.lambda_ != self.lambda_:
raise ValueError("`lambda_` must not be nan")
if not isinstance(self.use_estimated_pscore, bool):
raise TypeError(
f"`use_estimated_pscore` must be a bool, but {type(self.use_estimated_pscore)} is given"
)
def _estimate_round_rewards(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate round-wise (or sample-wise) rewards.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_k(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore` must be given.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
Returns
----------
estimated_rewards: array-like, shape (n_rounds,)
Estimated rewards for each observation.
"""
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
iw = action_dist[np.arange(action.shape[0]), action, position] / pscore
# weight clipping
if isinstance(iw, np.ndarray):
iw = np.minimum(iw, self.lambda_)
return reward * iw
def estimate_policy_value(
self,
reward: np.ndarray,
action: np.ndarray,
action_dist: np.ndarray,
pscore: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
estimated_pscore: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate the policy value of evaluation policy.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_k(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore` must be given.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
estimated_pscore: array-like, shape (n_rounds,), default=None
Estimated behavior policy (propensity scores), i.e., :math:`\\hat{\\pi}_k(a_i|x_i)`.
If `self.use_estimated_pscore` is True, `estimated_pscore` must be given.
Returns
----------
V_hat: float
Estimated policy value of evaluation policy.
"""
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
if self.use_estimated_pscore:
check_array(array=estimated_pscore, name="estimated_pscore", expected_dim=1)
pscore_ = estimated_pscore
else:
check_array(array=pscore, name="pscore", expected_dim=1)
pscore_ = pscore
check_multi_loggers_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore_,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
pscore=pscore_,
action_dist=action_dist,
).mean()
def estimate_interval(
self,
reward: np.ndarray,
action: np.ndarray,
action_dist: np.ndarray,
pscore: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
estimated_pscore: Optional[np.ndarray] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate the confidence interval of the policy value using bootstrap.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_k(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore` must be given.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
estimated_pscore: array-like, shape (n_rounds,), default=None
Estimated behavior policy (propensity scores), i.e., :math:`\\hat{\\pi}_b(a_i|x_i)`.
If `self.use_estimated_pscore` is True, `estimated_pscore` must be given.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=10000
Number of resampling performed in bootstrap sampling.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
if self.use_estimated_pscore:
check_array(array=estimated_pscore, name="estimated_pscore", expected_dim=1)
pscore_ = estimated_pscore
else:
check_array(array=pscore, name="pscore", expected_dim=1)
pscore_ = pscore
check_multi_loggers_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore_,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
estimated_round_rewards = self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
pscore=pscore_,
action_dist=action_dist,
)
return estimate_confidence_interval_by_bootstrap(
samples=estimated_round_rewards,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
@dataclass
class MultiLoggersBalancedInverseProbabilityWeighting(
BaseMultiLoggersOffPolicyEstimator
):
"""Multi-Loggers Balanced Inverse Probability Weighting (Multi-Bal-IPW) Estimator.
Note
-------
This estimator is called Balanced IPS in Agarwal et al.(2018) and Standard IS in Kallus et al.(2021).
Note that this estimator is different from `obp.ope.BalancedInverseProbabilityWeighting`, which is for the standard OPE setting.
Multi-Bal-IPW estimates the policy value of evaluation policy :math:`\\pi_e`
using logged data collected by multiple logging/behavior policies as
.. math::
\\hat{V}_{\\mathrm{Multi-Bal-IPW}} (\\pi_e; \\mathcal{D}) := \\mathbb{E}_{n} [ w_{avg}(x_i,a_i) r_i],
where :math:`\\mathcal{D}_k=\\{(x_i,a_i,r_i)\\}_{i=1}^{n_k}` is logged bandit data with :math:`n_k` observations collected by
the k-th behavior policy :math:`\\pi_k`.
:math:`w_{avg}(x,a):=\\pi_e (a|x)/\\pi_{avg} (a|x)` is the importance weight given :math:`x` and :math:`a` computed for the *average* behavior policy, which is defined as :math:`\\pi_{avg}(a|x) := \\sum_{k=1}^K \\rho_k \\pi_k(a|x)`.
We can represent the whole logged bandit data as :math:`\\mathcal{D}=\\{(k_i,x_i,a_i,r_i)\\}_{i=1}^{n}` where :math:`k_i` is the index to indicate the logging/behavior policy that generates i-th data, i.e., :math:`\\pi_{k_i}`.
Note that :math:`n := \\sum_{k=1}^K` is the total number of logged bandit data, and :math:`\\rho_k := n_k / n` is the dataset proportions.
:math:`\\mathbb{E}_{n}[\\cdot]` is the empirical average over :math:`n` observations in :math:`\\mathcal{D}`.
When the clipping is applied, a large importance weight is clipped as :math:`\\hat{w}_{avg}(x,a) := \\min \\{ \\lambda, w_{avg}(x,a) \\}`, where :math:`\\lambda (>0)` is a hyperparameter to specify a maximum allowed importance weight.
Multi-Bal-IPW applies the standard IPW based on the averaged logging/behavior policy :math:`\\pi_{avg}`.
Parameters
------------
lambda_: float, default=np.inf
A maximum possible value of the importance weight.
When a positive finite value is given, importance weights larger than `lambda_` will be clipped.
use_estimated_pscore: bool, default=False.
If True, `estimated_pscore` is used, otherwise, `pscore` (the true propensity scores) is used.
estimator_name: str, default='multi_bal_ipw'.
Name of the estimator.
References
------------
Aman Agarwal, Soumya Basu, Tobias Schnabel, and Thorsten Joachims.
"Effective Evaluation using Logged Bandit Feedback from Multiple Loggers.", 2018.
Nathan Kallus, Yuta Saito, and Masatoshi Uehara.
"Optimal Off-Policy Evaluation from Multiple Logging Policies.", 2021.
"""
lambda_: float = np.inf
use_estimated_pscore: bool = False
estimator_name: str = "multi_bal_ipw"
def __post_init__(self) -> None:
"""Initialize Class."""
check_scalar(
self.lambda_,
name="lambda_",
target_type=(int, float),
min_val=0.0,
)
if self.lambda_ != self.lambda_:
raise ValueError("`lambda_` must not be nan")
if not isinstance(self.use_estimated_pscore, bool):
raise TypeError(
f"`use_estimated_pscore` must be a bool, but {type(self.use_estimated_pscore)} is given"
)
def _estimate_round_rewards(
self,
reward: np.ndarray,
action: np.ndarray,
pscore_avg: np.ndarray,
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate round-wise (or sample-wise) rewards.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
pscore_avg: array-like, shape (n_rounds,)
Action choice probabilities of the average logging/behavior policy, i.e., :math:`\\pi_{avg}(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore_avg` must be given.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
Returns
----------
estimated_rewards: array-like, shape (n_rounds,)
Estimated rewards for each observation.
"""
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
iw_avg = action_dist[np.arange(action.shape[0]), action, position] / pscore_avg
# weight clipping
if isinstance(iw_avg, np.ndarray):
iw_avg = np.minimum(iw_avg, self.lambda_)
return reward * iw_avg
def estimate_policy_value(
self,
reward: np.ndarray,
action: np.ndarray,
action_dist: np.ndarray,
pscore_avg: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
estimated_pscore_avg: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate the policy value of evaluation policy.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
pscore_avg: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_{avg}(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore_avg` must be given.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
estimated_pscore_avg: array-like, shape (n_rounds,), default=None
Estimated average logging/behavior policy, i.e., :math:`\\hat{\\pi}_{avg}(a_i|x_i)`.
If `self.use_estimated_pscore` is True, `estimated_pscore` must be given.
Returns
----------
V_hat: float
Estimated policy value of evaluation policy.
"""
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
if self.use_estimated_pscore:
check_array(
array=estimated_pscore_avg, name="estimated_pscore_avg", expected_dim=1
)
pscore_ = estimated_pscore_avg
else:
check_array(array=pscore_avg, name="pscore_avg", expected_dim=1)
pscore_ = pscore_avg
check_multi_loggers_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore_,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
pscore_avg=pscore_,
action_dist=action_dist,
).mean()
def estimate_interval(
self,
reward: np.ndarray,
action: np.ndarray,
action_dist: np.ndarray,
pscore_avg: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
estimated_pscore_avg: Optional[np.ndarray] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate the confidence interval of the policy value using bootstrap.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
pscore_avg: array-like, shape (n_rounds,), default=None
Action choice probabilities of the average logging/behavior policy (propensity scores), i.e., :math:`\\pi_{avg}(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore_avg` must be given.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
estimated_pscore: array-like, shape (n_rounds,), default=None
Estimated logging/behavior policy, i.e., :math:`\\hat{\\pi}_b(a_i|x_i)`.
If `self.use_estimated_pscore` is True, `estimated_pscore` must be given.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=10000
Number of resampling performed in bootstrap sampling.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
if self.use_estimated_pscore:
check_array(
array=estimated_pscore_avg, name="estimated_pscore_avg", expected_dim=1
)
pscore_ = estimated_pscore_avg
else:
check_array(array=pscore_avg, name="pscore_avg", expected_dim=1)
pscore_ = pscore_avg
check_multi_loggers_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore_,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
estimated_round_rewards = self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
pscore=pscore_,
action_dist=action_dist,
)
return estimate_confidence_interval_by_bootstrap(
samples=estimated_round_rewards,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
@dataclass
class MultiLoggersWeightedInverseProbabilityWeighting(
MultiLoggersNaiveInverseProbabilityWeighting
):
"""Multi-Loggers Weighted Inverse Probability Weighting (Multi-Weighted-IPW) Estimator.
Note
-------
This estimator is called Weighted IPS in Agarwal et al.(2018) and Precision Weighted IS in Kallus et al.(2021).
Multi-Weighted-IPW estimates the policy value of evaluation policy :math:`\\pi_e`
using logged data collected by multiple logging/behavior policies as
.. math::
\\hat{V}_{\\mathrm{Multi-Weighted-IPW}} (\\pi_e; \\mathcal{D})
:= \\sum_{k=1}^K \\M^*_k \\mathbb{E}_{n_k} [ w_k(x_i,a_i) r_i],
where :math:`\\mathcal{D}_k=\\{(x_i,a_i,r_i)\\}_{i=1}^{n_k}` is logged bandit data with :math:`n_k` observations collected by
the k-th behavior policy :math:`\\pi_k`. :math:`w_k(x,a):=\\pi_e (a|x)/\\pi_k (a|x)` is the importance weight given :math:`x` and :math:`a` computed for the k-th behavior policy.
We can represent the whole logged bandit data as :math:`\\mathcal{D}=\\{(k_i,x_i,a_i,r_i)\\}_{i=1}^{n}` where :math:`k_i` is the index to indicate the logging/behavior policy that generates i-th data, i.e., :math:`\\pi_{k_i}`.
Note that :math:`n := \\sum_{k=1}^K` is the total number of logged bandit data, and :math:`\\rho_k := n_k / n` is the dataset proportions.
:math:`\\mathbb{E}_{n}[\\cdot]` is the empirical average over :math:`n` observations in :math:`\\mathcal{D}`.
When the clipping is applied, a large importance weight is clipped as :math:`\\hat{w}_k(x,a) := \\min \\{ \\lambda, w_k(x,a) \\}`, where :math:`\\lambda (>0)` is a hyperparameter to specify a maximum allowed importance weight.
Multi-Weighted-IPW prioritizes the strata generated by the logging/behavior policies similar to the evaluation policy.
The weight for the k-th logging/behavior policy :math:`\\M^*_k` is defined based on
the divergence between the evaluation policy :math:`\\pi_e` and :math:`\\pi_k`.
Parameters
------------
lambda_: float, default=np.inf
A maximum possible value of the importance weight.
When a positive finite value is given, importance weights larger than `lambda_` will be clipped.
use_estimated_pscore: bool, default=False.
If True, `estimated_pscore` is used, otherwise, `pscore` (the true propensity scores) is used.
estimator_name: str, default='multi_weighted_ipw'.
Name of the estimator.
References
------------
Aman Agarwal, Soumya Basu, Tobias Schnabel, and Thorsten Joachims.
"Effective Evaluation using Logged Bandit Feedback from Multiple Loggers.", 2018.
Nathan Kallus, Yuta Saito, and Masatoshi Uehara.
"Optimal Off-Policy Evaluation from Multiple Logging Policies.", 2021.
"""
estimator_name: str = "multi_weighted_ipw"
def _estimate_round_rewards(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
stratum_idx: np.ndarray,
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate round-wise (or sample-wise) rewards.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_k(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore` must be given.
stratum_idx: array-like, shape (n_rounds,)
Indices to differentiate the logging/behavior policy that generate each data, i.e., :math:`k`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
Returns
----------
estimated_rewards: array-like, shape (n_rounds,)
Estimated rewards for each observation.
"""
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
n = action.shape[0]
iw = action_dist[np.arange(n), action, position] / pscore
# weight clipping
if isinstance(iw, np.ndarray):
iw = np.minimum(iw, self.lambda_)
unique_stratum_idx, n_data_strata = np.unique(stratum_idx, return_counts=True)
var_k = np.zeros(unique_stratum_idx.shape[0])
for k in unique_stratum_idx:
idx_ = stratum_idx == k
var_k[k] = np.var(reward[idx_] * iw[idx_])
weight_k = n / (var_k * np.sum(n_data_strata / var_k))
return reward * iw * weight_k[stratum_idx]
def estimate_policy_value(
self,
reward: np.ndarray,
action: np.ndarray,
action_dist: np.ndarray,
stratum_idx: np.ndarray,
pscore: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
estimated_pscore: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate the policy value of evaluation policy.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
stratum_idx: array-like, shape (n_rounds,)
Indices to differentiate the logging/behavior policy that generate each data, i.e., :math:`k`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_k(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore` must be given.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
estimated_pscore: array-like, shape (n_rounds,), default=None
Estimated behavior policy (propensity scores), i.e., :math:`\\hat{\\pi}_k(a_i|x_i)`.
If `self.use_estimated_pscore` is True, `estimated_pscore` must be given.
Returns
----------
V_hat: float
Estimated policy value of evaluation policy.
"""
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=stratum_idx, name="stratum_idx", expected_dim=1)
if self.use_estimated_pscore:
check_array(array=estimated_pscore, name="estimated_pscore", expected_dim=1)
pscore_ = estimated_pscore
else:
check_array(array=pscore, name="pscore", expected_dim=1)
pscore_ = pscore
check_multi_loggers_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
stratum_idx=stratum_idx,
pscore=pscore_,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
pscore=pscore_,
stratum_idx=stratum_idx,
action_dist=action_dist,
).mean()
def estimate_interval(
self,
reward: np.ndarray,
action: np.ndarray,
stratum_idx: np.ndarray,
action_dist: np.ndarray,
pscore: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
estimated_pscore: Optional[np.ndarray] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate the confidence interval of the policy value using bootstrap.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
stratum_idx: array-like, shape (n_rounds,)
Indices to differentiate the logging/behavior policy that generate each data, i.e., :math:`k_i`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_k(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore` must be given.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
estimated_pscore: array-like, shape (n_rounds,), default=None
Estimated behavior policy (propensity scores), i.e., :math:`\\hat{\\pi}_b(a_i|x_i)`.
If `self.use_estimated_pscore` is True, `estimated_pscore` must be given.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=10000
Number of resampling performed in bootstrap sampling.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=stratum_idx, name="stratum_idx", expected_dim=1)
if self.use_estimated_pscore:
check_array(array=estimated_pscore, name="estimated_pscore", expected_dim=1)
pscore_ = estimated_pscore
else:
check_array(array=pscore, name="pscore", expected_dim=1)
pscore_ = pscore
check_multi_loggers_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
stratum_idx=stratum_idx,
pscore=pscore_,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
estimated_round_rewards = self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
stratum_idx=stratum_idx,
pscore=pscore_,
action_dist=action_dist,
)
return estimate_confidence_interval_by_bootstrap(
samples=estimated_round_rewards,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
@dataclass
class MultiLoggersNaiveDoublyRobust(BaseMultiLoggersOffPolicyEstimator):
"""Multi-Loggers Naive Doubly Robust (Multi-Naive-DR) Estimator.
Note
-------
This estimator is called Average DR in Kallus et al.(2021).
Multi-Naive-DR estimates the policy value of evaluation policy :math:`\\pi_e`
using logged data collected by multiple logging/behavior policies as
.. math::
\\hat{V}_{\\mathrm{Multi-Naive-DR}} (\\pi_e; \\mathcal{D}, \\hat{q})
:= \\mathbb{E}_{n} [\\hat{q}(x_i,\\pi_e) + w_{k_i}(x_i,a_i) (r_i - \\hat{q}(x_i,a_i))],
where :math:`\\mathcal{D}_k=\\{(x_i,a_i,r_i)\\}_{i=1}^{n_k}` is logged bandit data with :math:`n_k` observations collected by
the k-th behavior policy :math:`\\pi_k`. :math:`w_k(x,a):=\\pi_e (a|x)/\\pi_k (a|x)` is the importance weight given :math:`x` and :math:`a` computed for the k-th behavior policy.
We can represent the whole logged bandit data as :math:`\\mathcal{D}=\\{(k_i,x_i,a_i,r_i)\\}_{i=1}^{n}` where :math:`k_i` is the index to indicate the logging/behavior policy that generates i-th data, i.e., :math:`\\pi_{k_i}`.
Note that :math:`n := \\sum_{k=1}^K` is the total number of logged bandit data.
:math:`\\mathbb{E}_{n}[\\cdot]` is the empirical average over :math:`n` observations in :math:`\\mathcal{D}`.
:math:`\\hat{q} (x,a)` is the estimated expected reward given :math:`x` and :math:`a`.
:math:`\\hat{q} (x_i,\\pi):= \\mathbb{E}_{a \\sim \\pi(a|x)}[\\hat{q}(x,a)]` is the expectation of the estimated reward function over :math:`\\pi`.
When the clipping is applied, a large importance weight is clipped as :math:`\\hat{w}_k(x,a) := \\min \\{ \\lambda, w_k(x,a) \\}`, where :math:`\\lambda (>0)` is a hyperparameter to specify a maximum allowed importance weight.
Multi-Naive-DR applies the standard DR to each stratum and takes the weighted average of the K datasets.
Parameters
------------
lambda_: float, default=np.inf
A maximum possible value of the importance weight.
When a positive finite value is given, importance weights larger than `lambda_` will be clipped.
use_estimated_pscore: bool, default=False.
If True, `estimated_pscore` is used, otherwise, `pscore` (the true propensity scores) is used.
estimator_name: str, default='multi_dr'.
Name of the estimator.
References
------------
Aman Agarwal, Soumya Basu, Tobias Schnabel, and Thorsten Joachims.
"Effective Evaluation using Logged Bandit Feedback from Multiple Loggers.", 2018.
Nathan Kallus, Yuta Saito, and Masatoshi Uehara.
"Optimal Off-Policy Evaluation from Multiple Logging Policies.", 2021.
"""
lambda_: float = np.inf
use_estimated_pscore: bool = False
estimator_name: str = "multi_dr"
def __post_init__(self) -> None:
"""Initialize Class."""
check_scalar(
self.lambda_,
name="lambda_",
target_type=(int, float),
min_val=0.0,
)
if self.lambda_ != self.lambda_:
raise ValueError("`lambda_` must not be nan")
if not isinstance(self.use_estimated_pscore, bool):
raise TypeError(
f"`use_estimated_pscore` must be a bool, but {type(self.use_estimated_pscore)} is given"
)
def _estimate_round_rewards(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate round-wise (or sample-wise) rewards.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_k(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore` must be given.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Estimated expected rewards given context, action, and position, i.e., :math:`\\hat{q}(x_i,a_i)`.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
Returns
----------
estimated_rewards: array-like, shape (n_rounds,)
Estimated rewards for each observation.
"""
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
iw = action_dist[np.arange(action.shape[0]), action, position] / pscore
# weight clipping
if isinstance(iw, np.ndarray):
iw = np.minimum(iw, self.lambda_)
n = action.shape[0]
q_hat_at_position = estimated_rewards_by_reg_model[np.arange(n), :, position]
q_hat_factual = estimated_rewards_by_reg_model[np.arange(n), action, position]
pi_e_at_position = action_dist[np.arange(n), :, position]
estimated_rewards = np.average(
q_hat_at_position,
weights=pi_e_at_position,
axis=1,
)
estimated_rewards += iw * (reward - q_hat_factual)
return estimated_rewards
def estimate_policy_value(
self,
reward: np.ndarray,
action: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
pscore: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
estimated_pscore: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate the policy value of evaluation policy.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Estimated expected rewards given context, action, and position, i.e., :math:`\\hat{q}(x_i,a_i)`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_k(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore` must be given.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
estimated_pscore: array-like, shape (n_rounds,), default=None
Estimated behavior policy (propensity scores), i.e., :math:`\\hat{\\pi}_k(a_i|x_i)`.
If `self.use_estimated_pscore` is True, `estimated_pscore` must be given.
Returns
----------
V_hat: float
Estimated policy value of evaluation policy.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
if self.use_estimated_pscore:
check_array(array=estimated_pscore, name="estimated_pscore", expected_dim=1)
pscore_ = estimated_pscore
else:
check_array(array=pscore, name="pscore", expected_dim=1)
pscore_ = pscore
check_multi_loggers_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore_,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
pscore=pscore_,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
).mean()
def estimate_interval(
self,
reward: np.ndarray,
action: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
pscore: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
estimated_pscore: Optional[np.ndarray] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate the confidence interval of the policy value using bootstrap.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Estimated expected rewards given context, action, and position, i.e., :math:`\\hat{q}(x_i,a_i)`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_k(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore` must be given.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
estimated_pscore: array-like, shape (n_rounds,), default=None
Estimated behavior policy (propensity scores), i.e., :math:`\\hat{\\pi}_b(a_i|x_i)`.
If `self.use_estimated_pscore` is True, `estimated_pscore` must be given.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=10000
Number of resampling performed in bootstrap sampling.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
if self.use_estimated_pscore:
check_array(array=estimated_pscore, name="estimated_pscore", expected_dim=1)
pscore_ = estimated_pscore
else:
check_array(array=pscore, name="pscore", expected_dim=1)
pscore_ = pscore
check_multi_loggers_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore_,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
estimated_round_rewards = self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
pscore=pscore_,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
return estimate_confidence_interval_by_bootstrap(
samples=estimated_round_rewards,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
@dataclass
class MultiLoggersBalancedDoublyRobust(BaseMultiLoggersOffPolicyEstimator):
"""Multi-Loggers Balanced DoublyRobust (Multi-Bal-DR) Estimator.
Note
-------
This estimator is called DR in Kallus et al.(2021).
Multi-Bal-DR estimates the policy value of evaluation policy :math:`\\pi_e`
using logged data collected by multiple logging/behavior policies as
.. math::
\\hat{V}_{\\mathrm{Multi-Bal-DR}} (\\pi_e; \\mathcal{D}, \\hat{q})
:= \\mathbb{E}_{n} [ \\hat{q}(x_i,\\pi_e) w_{avg}(x_i,a_i) (r_i - \\hat{q}(x_i,a_i))],
where :math:`\\mathcal{D}_k=\\{(x_i,a_i,r_i)\\}_{i=1}^{n_k}` is logged bandit data with :math:`n_k` observations collected by
the k-th behavior policy :math:`\\pi_k`.
:math:`w_{avg}(x,a):=\\pi_e (a|x)/\\pi_{avg} (a|x)` is the importance weight given :math:`x` and :math:`a` computed for the *average* behavior policy, which is defined as :math:`\\pi_{avg}(a|x) := \\sum_{k=1}^K \\rho_k \\pi_k(a|x)`.
We can represent the whole logged bandit data as :math:`\\mathcal{D}=\\{(k_i,x_i,a_i,r_i)\\}_{i=1}^{n}` where :math:`k_i` is the index to indicate the logging/behavior policy that generates i-th data, i.e., :math:`\\pi_{k_i}`.
Note that :math:`n := \\sum_{k=1}^K` is the total number of logged bandit data, and :math:`\\rho_k := n_k / n` is the dataset proportions.
:math:`\\hat{q} (x,a)` is the estimated expected reward given :math:`x` and :math:`a`.
:math:`\\hat{q} (x_i,\\pi):= \\mathbb{E}_{a \\sim \\pi(a|x)}[\\hat{q}(x,a)]` is the expectation of the estimated reward function over :math:`\\pi`.
:math:`\\mathbb{E}_{n}[\\cdot]` is the empirical average over :math:`n` observations in :math:`\\mathcal{D}`.
When the clipping is applied, a large importance weight is clipped as :math:`\\hat{w}_{avg}(x,a) := \\min \\{ \\lambda, w_{avg}(x,a) \\}`, where :math:`\\lambda (>0)` is a hyperparameter to specify a maximum allowed importance weight.
Multi-Bal-DR applies the standard DR based on the averaged logging/behavior policy :math:`\\pi_{avg}`.
Parameters
------------
lambda_: float, default=np.inf
A maximum possible value of the importance weight.
When a positive finite value is given, importance weights larger than `lambda_` will be clipped.
use_estimated_pscore: bool, default=False.
If True, `estimated_pscore` is used, otherwise, `pscore` (the true propensity scores) is used.
estimator_name: str, default='multi_bal_dr'.
Name of the estimator.
References
------------
Aman Agarwal, Soumya Basu, Tobias Schnabel, and Thorsten Joachims.
"Effective Evaluation using Logged Bandit Feedback from Multiple Loggers.", 2018.
Nathan Kallus, Yuta Saito, and Masatoshi Uehara.
"Optimal Off-Policy Evaluation from Multiple Logging Policies.", 2021.
"""
lambda_: float = np.inf
use_estimated_pscore: bool = False
estimator_name: str = "multi_bal_dr"
def __post_init__(self) -> None:
"""Initialize Class."""
check_scalar(
self.lambda_,
name="lambda_",
target_type=(int, float),
min_val=0.0,
)
if self.lambda_ != self.lambda_:
raise ValueError("`lambda_` must not be nan")
if not isinstance(self.use_estimated_pscore, bool):
raise TypeError(
f"`use_estimated_pscore` must be a bool, but {type(self.use_estimated_pscore)} is given"
)
def _estimate_round_rewards(
self,
reward: np.ndarray,
action: np.ndarray,
pscore_avg: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate round-wise (or sample-wise) rewards.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
pscore_avg: array-like, shape (n_rounds,)
Action choice probabilities of the average logging/behavior policy, i.e., :math:`\\pi_{avg}(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore_avg` must be given.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Estimated expected rewards given context, action, and position, i.e., :math:`\\hat{q}(x_i,a_i)`.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
Returns
----------
estimated_rewards: array-like, shape (n_rounds,)
Estimated rewards for each observation.
"""
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
iw_avg = action_dist[np.arange(action.shape[0]), action, position] / pscore_avg
# weight clipping
if isinstance(iw_avg, np.ndarray):
iw_avg = np.minimum(iw_avg, self.lambda_)
n = action.shape[0]
q_hat_at_position = estimated_rewards_by_reg_model[np.arange(n), :, position]
q_hat_factual = estimated_rewards_by_reg_model[np.arange(n), action, position]
pi_e_at_position = action_dist[np.arange(n), :, position]
estimated_rewards = np.average(
q_hat_at_position,
weights=pi_e_at_position,
axis=1,
)
estimated_rewards += iw_avg * (reward - q_hat_factual)
return estimated_rewards
def estimate_policy_value(
self,
reward: np.ndarray,
action: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
pscore_avg: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
estimated_pscore_avg: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate the policy value of evaluation policy.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Estimated expected rewards given context, action, and position, i.e., :math:`\\hat{q}(x_i,a_i)`.
pscore_avg: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_{avg}(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore_avg` must be given.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
estimated_pscore_avg: array-like, shape (n_rounds,), default=None
Estimated average logging/behavior policy, i.e., :math:`\\hat{\\pi}_{avg}(a_i|x_i)`.
If `self.use_estimated_pscore` is True, `estimated_pscore` must be given.
Returns
----------
V_hat: float
Estimated policy value of evaluation policy.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
if self.use_estimated_pscore:
check_array(
array=estimated_pscore_avg, name="estimated_pscore_avg", expected_dim=1
)
pscore_ = estimated_pscore_avg
else:
check_array(array=pscore_avg, name="pscore_avg", expected_dim=1)
pscore_ = pscore_avg
check_multi_loggers_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore_,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
pscore_avg=pscore_,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
).mean()
def estimate_interval(
self,
reward: np.ndarray,
action: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
pscore_avg: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
estimated_pscore_avg: Optional[np.ndarray] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate the confidence interval of the policy value using bootstrap.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Estimated expected rewards given context, action, and position, i.e., :math:`\\hat{q}(x_i,a_i)`.
pscore_avg: array-like, shape (n_rounds,), default=None
Action choice probabilities of the average logging/behavior policy (propensity scores), i.e., :math:`\\pi_{avg}(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore_avg` must be given.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
estimated_pscore: array-like, shape (n_rounds,), default=None
Estimated logging/behavior policy, i.e., :math:`\\hat{\\pi}_b(a_i|x_i)`.
If `self.use_estimated_pscore` is True, `estimated_pscore` must be given.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=10000
Number of resampling performed in bootstrap sampling.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
if self.use_estimated_pscore:
check_array(
array=estimated_pscore_avg, name="estimated_pscore_avg", expected_dim=1
)
pscore_ = estimated_pscore_avg
else:
check_array(array=pscore_avg, name="pscore_avg", expected_dim=1)
pscore_ = pscore_avg
check_multi_loggers_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore_,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
estimated_round_rewards = self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
pscore=pscore_,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
action_dist=action_dist,
)
return estimate_confidence_interval_by_bootstrap(
samples=estimated_round_rewards,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
@dataclass
class MultiLoggersWeightedDoublyRobust(MultiLoggersNaiveDoublyRobust):
"""Multi-Loggers Weighted Doubly Robust (Multi-Weighted-DR) Estimator.
Note
-------
This estimator is called Precision Weighted DR in Kallus et al.(2021).
Multi-Weighted-DR estimates the policy value of evaluation policy :math:`\\pi_e`
using logged data collected by multiple logging/behavior policies as
.. math::
\\hat{V}_{\\mathrm{Multi-Weighted-DR}} (\\pi_e; \\mathcal{D}, \\hat{q})
:= \\sum_{k=1}^K \\M^{*}_k \\mathbb{E}_{n_k} [\\hat{q}(x_i,\\pi_e) + w_k(x_i,a_i) (r_i - \\hat{q}(x_i,a_i))],
where :math:`\\mathcal{D}_k=\\{(x_i,a_i,r_i)\\}_{i=1}^{n_k}` is logged bandit data with :math:`n_k` observations collected by
the k-th behavior policy :math:`\\pi_k`. :math:`w_k(x,a):=\\pi_e (a|x)/\\pi_k (a|x)` is the importance weight given :math:`x` and :math:`a` computed for the k-th behavior policy.
We can represent the whole logged bandit data as :math:`\\mathcal{D}=\\{(k_i,x_i,a_i,r_i)\\}_{i=1}^{n}` where :math:`k_i` is the index to indicate the logging/behavior policy that generates i-th data, i.e., :math:`\\pi_{k_i}`.
Note that :math:`n := \\sum_{k=1}^K` is the total number of logged bandit data, and :math:`\\rho_k := n_k / n` is the dataset proportions.
:math:`\\mathbb{E}_{n}[\\cdot]` is the empirical average over :math:`n` observations in :math:`\\mathcal{D}`.
:math:`\\hat{q} (x,a)` is the estimated expected reward given :math:`x` and :math:`a`.
:math:`\\hat{q} (x_i,\\pi):= \\mathbb{E}_{a \\sim \\pi(a|x)}[\\hat{q}(x,a)]` is the expectation of the estimated reward function over :math:`\\pi`.
When the clipping is applied, a large importance weight is clipped as :math:`\\hat{w}_k(x,a) := \\min \\{ \\lambda, w_k(x,a) \\}`, where :math:`\\lambda (>0)` is a hyperparameter to specify a maximum allowed importance weight.
Multi-Weighted-DR prioritizes the strata generated by the logging/behavior policies similar to the evaluation policy.
The weight for the k-th logging/behavior policy :math:`\\M^*_k` is defined based on
the divergence between the evaluation policy :math:`\\pi_e` and :math:`\\pi_k`.
Parameters
------------
lambda_: float, default=np.inf
A maximum possible value of the importance weight.
When a positive finite value is given, importance weights larger than `lambda_` will be clipped.
use_estimated_pscore: bool, default=False.
If True, `estimated_pscore` is used, otherwise, `pscore` (the true propensity scores) is used.
estimator_name: str, default='multi_weighted_dr'.
Name of the estimator.
References
------------
Aman Agarwal, Soumya Basu, Tobias Schnabel, and Thorsten Joachims.
"Effective Evaluation using Logged Bandit Feedback from Multiple Loggers.", 2018.
Nathan Kallus, Yuta Saito, and Masatoshi Uehara.
"Optimal Off-Policy Evaluation from Multiple Logging Policies.", 2021.
"""
estimator_name: str = "multi_weighted_dr"
def _estimate_round_rewards(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
stratum_idx: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate round-wise (or sample-wise) rewards.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_k(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore` must be given.
stratum_idx: array-like, shape (n_rounds,)
Indices to differentiate the logging/behavior policy that generate each data, i.e., :math:`k`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Estimated expected rewards given context, action, and position, i.e., :math:`\\hat{q}(x_i,a_i)`.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
Returns
----------
estimated_rewards: array-like, shape (n_rounds,)
Estimated rewards for each observation.
"""
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
iw = action_dist[np.arange(action.shape[0]), action, position] / pscore
# weight clipping
if isinstance(iw, np.ndarray):
iw = np.minimum(iw, self.lambda_)
n = action.shape[0]
q_hat_at_position = estimated_rewards_by_reg_model[np.arange(n), :, position]
q_hat_factual = estimated_rewards_by_reg_model[np.arange(n), action, position]
pi_e_at_position = action_dist[np.arange(n), :, position]
estimated_rewards = np.average(
q_hat_at_position,
weights=pi_e_at_position,
axis=1,
)
unique_stratum_idx, n_data_strata = np.unique(stratum_idx, return_counts=True)
var_k = np.zeros(unique_stratum_idx.shape[0])
for k in unique_stratum_idx:
idx_ = stratum_idx == k
var_k[k] = np.var(
estimated_rewards[idx_]
+ iw[idx_] * (reward[idx_] - q_hat_factual[idx_])
)
weight_k = n / (var_k * np.sum(n_data_strata / var_k))
estimated_rewards += iw * (reward - q_hat_factual) * weight_k[stratum_idx]
return estimated_rewards
def estimate_policy_value(
self,
reward: np.ndarray,
action: np.ndarray,
action_dist: np.ndarray,
stratum_idx: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
pscore: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
estimated_pscore: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate the policy value of evaluation policy.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
stratum_idx: array-like, shape (n_rounds,)
Indices to differentiate the logging/behavior policy that generate each data, i.e., :math:`k`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Estimated expected rewards given context, action, and position, i.e., :math:`\\hat{q}(x_i,a_i)`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_k(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore` must be given.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
estimated_pscore: array-like, shape (n_rounds,), default=None
Estimated behavior policy (propensity scores), i.e., :math:`\\hat{\\pi}_k(a_i|x_i)`.
If `self.use_estimated_pscore` is True, `estimated_pscore` must be given.
Returns
----------
V_hat: float
Estimated policy value of evaluation policy.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=stratum_idx, name="stratum_idx", expected_dim=1)
if self.use_estimated_pscore:
check_array(array=estimated_pscore, name="estimated_pscore", expected_dim=1)
pscore_ = estimated_pscore
else:
check_array(array=pscore, name="pscore", expected_dim=1)
pscore_ = pscore
check_multi_loggers_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
stratum_idx=stratum_idx,
pscore=pscore_,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
pscore=pscore_,
stratum_idx=stratum_idx,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
).mean()
def estimate_interval(
self,
reward: np.ndarray,
action: np.ndarray,
stratum_idx: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
pscore: Optional[np.ndarray] = None,
position: Optional[np.ndarray] = None,
estimated_pscore: Optional[np.ndarray] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate the confidence interval of the policy value using bootstrap.
Parameters
----------
reward: array-like, shape (n_rounds,)
Rewards observed for each data in logged bandit data, i.e., :math:`r_i`.
action: array-like, shape (n_rounds,)
Actions sampled by the logging/behavior policy for each data in logged bandit data, i.e., :math:`a_i`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of the evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_i|x_i)`.
stratum_idx: array-like, shape (n_rounds,)
Indices to differentiate the logging/behavior policy that generate each data, i.e., :math:`k_i`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Estimated expected rewards given context, action, and position, i.e., :math:`\\hat{q}(x_i,a_i)`.
pscore: array-like, shape (n_rounds,), default=None
Action choice probabilities of the logging/behavior policy (propensity scores), i.e., :math:`\\pi_k(a_i|x_i)`.
If `use_estimated_pscore` is False, `pscore` must be given.
position: array-like, shape (n_rounds,), default=None
Indices to differentiate positions in a recommendation interface where the actions are presented.
If None, the effect of position on the reward will be ignored.
(If only a single action is chosen for each data, you can just ignore this argument.)
estimated_pscore: array-like, shape (n_rounds,), default=None
Estimated behavior policy (propensity scores), i.e., :math:`\\hat{\\pi}_b(a_i|x_i)`.
If `self.use_estimated_pscore` is True, `estimated_pscore` must be given.
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=10000
Number of resampling performed in bootstrap sampling.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=stratum_idx, name="stratum_idx", expected_dim=1)
if self.use_estimated_pscore:
check_array(array=estimated_pscore, name="estimated_pscore", expected_dim=1)
pscore_ = estimated_pscore
else:
check_array(array=pscore, name="pscore", expected_dim=1)
pscore_ = pscore
check_multi_loggers_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
stratum_idx=stratum_idx,
pscore=pscore_,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
estimated_round_rewards = self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
stratum_idx=stratum_idx,
pscore=pscore_,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
return estimate_confidence_interval_by_bootstrap(
samples=estimated_round_rewards,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
| 43.279469
| 238
| 0.636498
| 10,274
| 78,206
| 4.657582
| 0.031341
| 0.044512
| 0.035986
| 0.038556
| 0.966835
| 0.959354
| 0.958978
| 0.948299
| 0.947902
| 0.945917
| 0
| 0.005294
| 0.256029
| 78,206
| 1,806
| 239
| 43.303433
| 0.817146
| 0.551978
| 0
| 0.890746
| 0
| 0
| 0.040236
| 0.013312
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032134
| false
| 0
| 0.012853
| 0
| 0.095116
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f57fcb5ea6bed2886b976c17772a903819ff1596
| 6,338
|
py
|
Python
|
main.py
|
Zeebra38/bot_tlgr_shedule
|
54566efc0583743e21256304dc4320f5ea1e3255
|
[
"MIT"
] | null | null | null |
main.py
|
Zeebra38/bot_tlgr_shedule
|
54566efc0583743e21256304dc4320f5ea1e3255
|
[
"MIT"
] | null | null | null |
main.py
|
Zeebra38/bot_tlgr_shedule
|
54566efc0583743e21256304dc4320f5ea1e3255
|
[
"MIT"
] | null | null | null |
from openpyxl import load_workbook
from datetime import date, timedelta, datetime, time
from date import weeknum, getweekday
from util import zapoln
from objects import Obj, Day
import os
def update():
#if not os.path.isfile('Rasp.txt'):
f = open('Baso-01.txt', 'w', encoding='utf8', errors='ignore')
f1 = open('Baso-02.txt', 'w', encoding='utf8', errors='ignore')
f2 = open('Baso-03.txt', 'w', encoding='utf8', errors='ignore')
f3 = open('Baso-04.txt', 'w', encoding='utf8', errors='ignore')
f4 = open('Baso-05.txt', 'w', encoding='utf8', errors='ignore')
f5 = open('Baso-06.txt', 'w', encoding='utf8', errors='ignore')
wb = load_workbook('./Raspisanie.xlsx')
sheet = wb.get_sheet_by_name('Лист1')
zapoln(f, sheet, 1)
zapoln(f1, sheet, 2)
zapoln(f2, sheet, 3)
zapoln(f3, sheet, 4)
zapoln(f4, sheet, 5)
zapoln(f5, sheet, 6)
f.close()
f1.close()
f2.close()
f3.close()
f4.close()
f5.close()
def todayr(message, bot=None, group=1, dayweek=-1):
b = Day()
f = open('Baso-0{}.txt'.format(group), 'r', encoding='utf8', errors='ignore')
if dayweek == -1:
today = datetime.today().weekday()
else:
today = dayweek
pos = 0
if today == 0:
for line in f:
pos += len(line)
if line == 'Понедельник:\n':
break
elif today == 1:
for line in f:
pos += len(line)
if line == 'Вторник:\n':
break
elif today == 2:
for line in f:
pos += len(line)
if line == 'Среда:\n':
break
elif today == 3:
for line in f:
pos += len(line)
if line == 'Четверг:\n':
break
elif today == 4:
for line in f:
pos += len(line)
if line == 'Пятница:\n':
break
elif today == 5:
for line in f:
pos += len(line)
if line == 'Суббота:\n':
break
elif today == 6:
if bot is not None:
bot.send_message(message.chat.id, 'Сегодня воскресенье, пар нет')
else:
return 'Сегодня воскресенье, пар нет'
return
k = 0
for line in f:
if k == 12:
break
else:
k += 1
b.objs.append(Obj(line, weeknum()))
if bot is not None:
buf = 'Группа - 0{} '.format(group) + b.show(weeknum(), today)
bot.send_message(message.chat.id, buf)
else:
buf = b.show(weeknum(), today)
if buf == 'Сегодня пар нет':
return 'Группа - 0{} '.format(group) + getweekday(today) + ' ' + str(weeknum()) + ' неделя\n' + buf + 2*'\n'
else:
return 'Группа - 0{} '.format(group) + buf + '\n'
del b
f.close()
def nextweektoday(message, bot=None, group=1, dayweek=-1):
b = Day()
f = open('Baso-0{}.txt'.format(group), 'r', encoding='utf8', errors='ignore')
if dayweek == -1:
today = datetime.today().weekday()
else:
today = dayweek
pos = 0
if today == 0:
for line in f:
pos += len(line)
if line == 'Понедельник:\n':
break
elif today == 1:
for line in f:
pos += len(line)
if line == 'Вторник:\n':
break
elif today == 2:
for line in f:
pos += len(line)
if line == 'Среда:\n':
break
elif today == 3:
for line in f:
pos += len(line)
if line == 'Четверг:\n':
break
elif today == 4:
for line in f:
pos += len(line)
if line == 'Пятница:\n':
break
elif today == 5:
for line in f:
pos += len(line)
if line == 'Суббота:\n':
break
elif today == 6:
if bot is not None:
bot.send_message(message.chat.id, 'Сегодня воскресенье, пар нет')
else:
return 'Сегодня воскресенье, пар нет'
return
k = 0
for line in f:
if k == 12:
break
else:
k += 1
b.objs.append(Obj(line, weeknum()+1))
if bot is not None:
buf = 'Группа - 0{} '.format(group) + b.show(weeknum()+1, today)
bot.send_message(message.chat.id, buf)
else:
buf = b.show(weeknum()+1, today)
if buf == 'Сегодня пар нет':
return 'Группа - 0{} '.format(group) + getweekday(today) + ' ' + str(weeknum()+1) + ' неделя\n' + buf + 2*'\n'
else:
return 'Группа - 0{} '.format(group) + buf + '\n'
del b
f.close()
def nextd(message, bot, group):
b = Day()
f = open('Baso-0{}.txt'.format(group), 'r', encoding='utf8', errors='ignore')
today = datetime.today().weekday() + 1
if today == 7:
today = 0
pos = 0
if today == 0:
for line in f:
pos += len(line)
if line == 'Понедельник:\n':
break
elif today == 1:
for line in f:
pos += len(line)
if line == 'Вторник:\n':
break
elif today == 2:
for line in f:
pos += len(line)
if line == 'Среда:\n':
break
elif today == 3:
for line in f:
pos += len(line)
if line == 'Четверг:\n':
break
elif today == 4:
for line in f:
pos += len(line)
if line == 'Пятница:\n':
break
elif today == 5:
for line in f:
pos += len(line)
if line == 'Суббота:\n':
break
elif today == 6:
bot.send_message(message.chat.id, 'Завтра воскресенье, пар нет')
return
k = 0
for line in f:
if k == 12:
break
else:
k += 1
if today == 0:
b.objs.append(Obj(line, weeknum() + 1))
else:
b.objs.append(Obj(line, weeknum()))
if today == 0:
buf = 'Группа - 0{} '.format(group) + b.show(weeknum() + 1, today)
bot.send_message(message.chat.id, buf)
else:
buf = 'Группа - 0{} '.format(group) + b.show(weeknum(), today)
bot.send_message(message.chat.id, buf)
f.close()
| 28.940639
| 122
| 0.476807
| 807
| 6,338
| 3.729864
| 0.135068
| 0.048837
| 0.062791
| 0.069767
| 0.817276
| 0.816944
| 0.752159
| 0.734219
| 0.734219
| 0.734219
| 0
| 0.027609
| 0.377091
| 6,338
| 218
| 123
| 29.073395
| 0.734802
| 0.005364
| 0
| 0.822115
| 0
| 0
| 0.112663
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019231
| false
| 0
| 0.028846
| 0
| 0.091346
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
196664e7372baa5f6d1f6cc16f761070b6275066
| 97
|
py
|
Python
|
sdmx/tests/format/test_format_json.py
|
khaeru/sdmx
|
d871e045f5bc163b83750b32bf22b5ac4cdebfc0
|
[
"Apache-2.0"
] | 4
|
2020-07-21T16:03:30.000Z
|
2022-01-12T12:10:05.000Z
|
sdmx/tests/format/test_format_json.py
|
khaeru/sdmx
|
d871e045f5bc163b83750b32bf22b5ac4cdebfc0
|
[
"Apache-2.0"
] | 93
|
2020-05-01T10:45:13.000Z
|
2022-02-15T17:10:11.000Z
|
sdmx/tests/format/test_format_json.py
|
khaeru/sdmx
|
d871e045f5bc163b83750b32bf22b5ac4cdebfc0
|
[
"Apache-2.0"
] | 8
|
2020-11-10T17:11:01.000Z
|
2022-01-19T13:35:32.000Z
|
from sdmx.format import json
def test_content_types():
assert 5 == len(json.CONTENT_TYPES)
| 16.166667
| 39
| 0.742268
| 15
| 97
| 4.6
| 0.8
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012346
| 0.164948
| 97
| 5
| 40
| 19.4
| 0.839506
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
196be485fcb2c509d86c396cef4193f0261697b4
| 4,838
|
py
|
Python
|
tests/test_cmd_modify.py
|
a1eko/treem
|
41039b0734bfe84fe637783842849038630ecb7f
|
[
"MIT"
] | 1
|
2020-10-06T13:09:02.000Z
|
2020-10-06T13:09:02.000Z
|
tests/test_cmd_modify.py
|
a1eko/treem
|
41039b0734bfe84fe637783842849038630ecb7f
|
[
"MIT"
] | null | null | null |
tests/test_cmd_modify.py
|
a1eko/treem
|
41039b0734bfe84fe637783842849038630ecb7f
|
[
"MIT"
] | 1
|
2021-09-22T14:17:22.000Z
|
2021-09-22T14:17:22.000Z
|
"""Testing CLI command modify."""
import subprocess
import os
def test_scale():
"""Tests for scaling of dimensions."""
os.chdir(os.path.dirname(__file__) + '/data')
proc = subprocess.Popen(['swc', 'modify', 'pass_simple_branch.swc',
'-s', '1', '1', '1', '-e', '2',
'-o', '/tmp/test_treem.swc'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = proc.communicate()
assert proc.returncode == 0
assert stdout == ''
assert stderr == ''
def test_scale_radius():
"""Tests for scaling of radii."""
os.chdir(os.path.dirname(__file__) + '/data')
proc = subprocess.Popen(['swc', 'modify', 'pass_simple_branch.swc',
'-r', '1', '-b', '1',
'-o', '/tmp/test_treem.swc'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = proc.communicate()
assert proc.returncode == 0
assert stdout == ''
assert stderr == ''
def test_unfold():
"""Tests for stretching and smoothing."""
os.chdir(os.path.dirname(__file__) + '/data')
proc = subprocess.Popen(['swc', 'modify', 'pass_zjump.swc',
'-t', '1', '-m', '1',
'-o', '/tmp/test_treem.swc'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = proc.communicate()
assert proc.returncode == 0
assert stdout == ''
assert stderr == ''
def test_jitter():
"""Tests for node jittering."""
os.chdir(os.path.dirname(__file__) + '/data')
proc = subprocess.Popen(['swc', 'modify', 'pass_simple_branch.swc',
'-i', '2', '4', '8', '-j', '0.3',
'--seed', '1',
'-o', '/tmp/test_treem.swc'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = proc.communicate()
assert proc.returncode == 0
assert stdout == ''
assert stderr == ''
def test_jitter_sec():
"""Tests for section jittering."""
os.chdir(os.path.dirname(__file__) + '/data')
proc = subprocess.Popen(['swc', 'modify', 'pass_simple_branch.swc',
'-i', '2', '4', '8', '-j', '0.3',
'--seed', '1', '--sec',
'-o', '/tmp/test_treem.swc'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = proc.communicate()
assert proc.returncode == 0
assert stdout == ''
assert stderr == ''
def test_twist():
"""Tests for branch twisting."""
os.chdir(os.path.dirname(__file__) + '/data')
proc = subprocess.Popen(['swc', 'modify', 'pass_simple_branch.swc',
'-i', '3', '9', '-w', '360',
'--seed', '1',
'-o', '/tmp/test_treem.swc'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = proc.communicate()
assert proc.returncode == 0
assert stdout == ''
assert stderr == ''
def test_swap():
"""Tests for branch swapping."""
os.chdir(os.path.dirname(__file__) + '/data')
proc = subprocess.Popen(['swc', 'modify', 'pass_simple_branch.swc',
'-i', '4', '8', '-a',
'--seed', '1',
'-o', '/tmp/test_treem.swc'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = proc.communicate()
assert proc.returncode == 0
assert stdout == ''
assert stderr == ''
def test_prune():
"""Tests for branch pruning."""
os.chdir(os.path.dirname(__file__) + '/data')
proc = subprocess.Popen(['swc', 'modify', 'pass_simple_branch.swc',
'-i', '4', '8', '-u',
'-o', '/tmp/test_treem.swc'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = proc.communicate()
assert proc.returncode == 0
assert stdout == ''
assert stderr == ''
| 37.503876
| 71
| 0.468582
| 454
| 4,838
| 4.832599
| 0.167401
| 0.102097
| 0.032817
| 0.047402
| 0.865542
| 0.865542
| 0.865542
| 0.865542
| 0.865542
| 0.865542
| 0
| 0.012974
| 0.378669
| 4,838
| 128
| 72
| 37.796875
| 0.7169
| 0.053535
| 0
| 0.823529
| 0
| 0
| 0.119568
| 0.033973
| 0
| 0
| 0
| 0
| 0.235294
| 1
| 0.078431
| false
| 0.078431
| 0.019608
| 0
| 0.098039
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
196d529c6ec8de61b82f2be6103726338da99594
| 27
|
py
|
Python
|
src/NicePrinter/__init__.py
|
24aitor/NicePrinter
|
01cef3add94534ed8ac597b4a0322aa433ec72aa
|
[
"MIT"
] | 4
|
2017-07-11T21:03:25.000Z
|
2018-09-11T09:51:26.000Z
|
src/NicePrinter/__init__.py
|
24aitor/NicePrinter
|
01cef3add94534ed8ac597b4a0322aa433ec72aa
|
[
"MIT"
] | 1
|
2017-05-31T23:46:45.000Z
|
2017-06-01T02:02:07.000Z
|
src/NicePrinter/__init__.py
|
24aitor/NicePrinter
|
01cef3add94534ed8ac597b4a0322aa433ec72aa
|
[
"MIT"
] | null | null | null |
from .niceprinter import *
| 13.5
| 26
| 0.777778
| 3
| 27
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
19788ae7459c830dbc02624c9ad177f3f2b78cba
| 5,329
|
py
|
Python
|
tests/test_cli.py
|
balazsdukai/cjio_dbexport
|
6f331be389b09364aee1ad32ded8c0882a0f2b5d
|
[
"MIT"
] | 3
|
2020-03-19T11:05:00.000Z
|
2021-11-10T14:50:00.000Z
|
tests/test_cli.py
|
balazsdukai/cjio_dbexport
|
6f331be389b09364aee1ad32ded8c0882a0f2b5d
|
[
"MIT"
] | 28
|
2020-01-02T12:46:16.000Z
|
2021-11-08T14:51:16.000Z
|
tests/test_cli.py
|
balazsdukai/cjio_dbexport
|
6f331be389b09364aee1ad32ded8c0882a0f2b5d
|
[
"MIT"
] | 3
|
2020-01-09T19:26:47.000Z
|
2021-09-29T08:10:21.000Z
|
#!/usr/bin/env python
"""Tests for `cjio_dbexport` package."""
import pytest
import logging
log = logging.getLogger(__name__)
from click.testing import CliRunner
from cjio_dbexport import cli
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'Export tool from PostGIS to CityJSON' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert 'Export tool from PostGIS to CityJSON' in help_result.output
@pytest.mark.db3dnl
class TestDb3DNLIntegration:
def test_export_tiles(self, data_output_dir, cfg_db3dnl_path_param, capsys):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
export_result = runner.invoke(cli.main, [
str(cfg_db3dnl_path_param),
'export_tiles',
'--jobs', '4',
'gb1', 'ic3', 'kh7', 'ec4',
str(data_output_dir)
])
if export_result.exit_code != 0:
log.error(export_result.stderr_bytes)
log.exception(export_result.exception)
pytest.fail()
if any(True for res in ['ERROR', 'CRITICAL', 'FATAL']
if res in export_result.output):
pytest.fail()
def test_export_tiles_merge(self, data_output_dir, cfg_db3dnl_path_param, capsys):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
export_result = runner.invoke(cli.main, [
str(cfg_db3dnl_path_param),
'export_tiles',
'--merge',
'--jobs', '4',
'gb1', 'ic3', 'kh7', 'ec4',
str(data_output_dir)
])
print(export_result.output)
if export_result.exit_code != 0:
log.error(export_result.stderr_bytes)
log.exception(export_result.exception)
pytest.fail()
if any(True for res in ['ERROR', 'CRITICAL', 'FATAL']
if res in export_result.output):
pytest.fail()
def test_export(self, data_output_dir, cfg_db3dnl_path_param):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
outfile = str(data_output_dir / 'test.json')
export_result = runner.invoke(cli.main, [
str(cfg_db3dnl_path_param),
'export',
outfile
])
if export_result.exit_code != 0:
log.error(export_result.stderr_bytes)
log.exception(export_result.exception)
pytest.fail()
def test_export_bbox(self, data_output_dir, cfg_db3dnl_path_param):
"""Test the CLI."""
runner = CliRunner()
outfile = str(data_output_dir / 'test_bbox.json')
export_result = runner.invoke(cli.main, [
str(cfg_db3dnl_path_param),
'export_bbox',
'92837.734', '465644.179', '193701.818', '466898.821',
outfile
])
if export_result.exit_code != 0:
log.error(export_result.stderr_bytes)
log.exception(export_result.exception)
pytest.fail()
def test_export_extent(self, data_output_dir, cfg_db3dnl_path_param, db3dnl_poly_geojson):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
outfile = str(data_output_dir / 'test_poly.json')
export_result = runner.invoke(cli.main, [
str(cfg_db3dnl_path_param),
'export_extent',
str(db3dnl_poly_geojson),
outfile
])
if export_result.exit_code != 0:
log.error(export_result.stderr_bytes)
log.exception(export_result.exception)
pytest.fail()
def test_index(self, db3dnl_poly_geojson, cfg_cjdb_path):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
export_result = runner.invoke(cli.main, [
str(cfg_cjdb_path),
'index',
'--drop',
str(db3dnl_poly_geojson),
'100', '100',
])
if export_result.exit_code != 0:
log.error(export_result.stderr_bytes)
log.exception(export_result.exception)
pytest.fail()
@pytest.mark.db3dnl
class TestLoD2Integration:
def test_export_one(self, data_output_dir, cfg_lod2_path_param, capsys):
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
export_result = runner.invoke(cli.main, [
str(cfg_lod2_path_param),
'export_tiles',
'--jobs', '1',
'ec4',
str(data_output_dir)
])
if export_result.exit_code != 0:
log.error(export_result.stderr_bytes)
log.exception(export_result.exception)
pytest.fail()
if any(True for res in ['ERROR', 'CRITICAL', 'FATAL']
if res in export_result.output):
pytest.fail()
| 34.380645
| 94
| 0.586789
| 623
| 5,329
| 4.76565
| 0.154093
| 0.129336
| 0.09094
| 0.106096
| 0.810711
| 0.787471
| 0.778377
| 0.778377
| 0.766588
| 0.766588
| 0
| 0.023618
| 0.300807
| 5,329
| 154
| 95
| 34.603896
| 0.773215
| 0.031338
| 0
| 0.730769
| 0
| 0
| 0.067631
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.061538
| false
| 0
| 0.030769
| 0
| 0.107692
| 0.007692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
271a95d4f4b676259c497be7fe59bdaa2f3fd905
| 103
|
py
|
Python
|
ambra_sdk/service/ws/__init__.py
|
dicomgrid/sdk-python
|
bb12eed311bad73dfb863917df4dc5cbcd91a447
|
[
"Apache-2.0"
] | 9
|
2020-04-20T23:45:44.000Z
|
2021-04-18T11:22:17.000Z
|
ambra_sdk/service/ws/__init__.py
|
dicomgrid/sdk-python
|
bb12eed311bad73dfb863917df4dc5cbcd91a447
|
[
"Apache-2.0"
] | 13
|
2020-02-08T16:15:05.000Z
|
2021-09-13T22:55:28.000Z
|
ambra_sdk/service/ws/__init__.py
|
dicomgrid/sdk-python
|
bb12eed311bad73dfb863917df4dc5cbcd91a447
|
[
"Apache-2.0"
] | 6
|
2020-03-25T17:47:45.000Z
|
2021-04-18T11:22:19.000Z
|
from ambra_sdk.service.ws.async_ws import AsyncWSManager
from ambra_sdk.service.ws.ws import WSManager
| 34.333333
| 56
| 0.864078
| 17
| 103
| 5.058824
| 0.529412
| 0.209302
| 0.27907
| 0.44186
| 0.488372
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07767
| 103
| 2
| 57
| 51.5
| 0.905263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
278760e9e674e71aa2c28beb5755e36f7fcee1fe
| 972
|
py
|
Python
|
check/_loaders.py
|
MichaelClerx/cellml-validation
|
72383c76bd0a69a9bb162b10ae2c7cd3d0e08b0c
|
[
"Apache-2.0"
] | 1
|
2019-05-06T22:55:12.000Z
|
2019-05-06T22:55:12.000Z
|
check/_loaders.py
|
MichaelClerx/cellml-validation
|
72383c76bd0a69a9bb162b10ae2c7cd3d0e08b0c
|
[
"Apache-2.0"
] | 65
|
2019-01-18T09:19:12.000Z
|
2022-01-27T16:17:06.000Z
|
check/_loaders.py
|
MichaelClerx/cellml-validation
|
72383c76bd0a69a9bb162b10ae2c7cd3d0e08b0c
|
[
"Apache-2.0"
] | null | null | null |
#
# Methods for loading model and validation files
#
import os
import check
def cellml_1_0(filename):
"""
Returns the path to a CellML 1.0 validation file.
"""
return os.path.join(check.CELLML_1_0_DIR, filename)
def cellml_1_1(filename):
"""
Returns the path to a CellML 1.1 validation file.
"""
return os.path.join(check.CELLML_1_1_DIR, filename)
def cellml_2_0(filename):
"""
Returns the path to a CellML 2.0 validation file.
"""
return os.path.join(check.CELLML_2_0_DIR, filename)
def model_1_0(*filename):
"""
Returns the path to a CellML 1.0 file.
"""
return os.path.join(check.MODELS_1_0_DIR, *filename)
def model_1_1(*filename):
"""
Returns the path to a CellML 1.1 file.
"""
return os.path.join(check.MODELS_1_1_DIR, *filename)
def model_2_0(*filename):
"""
Returns the path to a CellML 2.0 file.
"""
return os.path.join(check.MODELS_2_0_DIR, *filename)
| 19.44
| 56
| 0.662551
| 158
| 972
| 3.886076
| 0.164557
| 0.091205
| 0.175896
| 0.214984
| 0.838762
| 0.765472
| 0.700326
| 0.700326
| 0.545603
| 0.34202
| 0
| 0.047682
| 0.223251
| 972
| 49
| 57
| 19.836735
| 0.765563
| 0.323045
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0
| 0.142857
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
27fa717f805e536f40751d00b9761778aaf111cb
| 3,598
|
py
|
Python
|
pages/06 SingleChoice/00_single_choice.py
|
sebastiandres/stb_chapter_demo_v070
|
3ca5e53ececc678a39b9c79728f427b3e31746fe
|
[
"MIT"
] | null | null | null |
pages/06 SingleChoice/00_single_choice.py
|
sebastiandres/stb_chapter_demo_v070
|
3ca5e53ececc678a39b9c79728f427b3e31746fe
|
[
"MIT"
] | null | null | null |
pages/06 SingleChoice/00_single_choice.py
|
sebastiandres/stb_chapter_demo_v070
|
3ca5e53ececc678a39b9c79728f427b3e31746fe
|
[
"MIT"
] | null | null | null |
import streamlit as st
import streamlit_book as stb
import time
import random
st.title("Single Choice Question")
# Required arguments
st.header("Question with minimal arguments")
c1, c2 = st.columns([5,4])
with c1:
st.code("""
stb.single_choice("What does pandas (the library) stands for?",
["The cutest bear", "Panel Data",
"Pure Adamantium Numeric Datasets And Stuff", "PArties & DAtaSets"],
1)
""")
with c2:
stb.single_choice("What does pandas (the library) stands for?",
["The cutest bear", "Panel Data",
"Pure Adamantium Numeric Datasets And Stuff", "PArties & DAtaSets"],
1)
# All arguments
st.header("Question with all optional arguments")
c1, c2 = st.columns([5,4])
with c1:
st.code("""
stb.single_choice("What does pandas (python library) stands for?",
["The cutest bear", "Pure Adamantium Numeric Datasets And Stuff",
"Panel Data", "PArties & DAtaSets"],
2,
success='Now you know!',
error='Nopes, not this one...',
button='Check MY answer'
)
""")
with c2:
stb.single_choice("What does pandas (python library) stands for?",
["The cutest bear", "Pure Adamantium Numeric Datasets And Stuff",
"PArties & DAtaSets", "Panel Data"],
3,
success='Now you know!',
error='Nopes, not this one...',
button='Check MY answer'
)
# Custom question
st.header("Question with custom behavior")
c1, c2 = st.columns([5,4])
with c1:
st.code("""
checked_answer, correct_answer = stb.single_choice(
"What does pandas (the python library) stands for?",
["The cutest bear",
"Pure Adamantium Numeric Datasets And Stuff",
"Panel Data",
"PArties & DAtaSets"],
2,
success='',
error='',
button='Check THE answer'
)
if checked_answer:
if correct_answer:
st.info("Yes! It's Panel Data, but here's a pandas as a prize just for you!")
st.image('https://www.stockvault.net/data/2016/06/30/203684/preview16.jpg')
st.balloons()
else:
st.warning("Sadly, that's not true")
else:
st.write("You need to check the answer")
""")
with c2:
checked_answer, correct_answer = stb.single_choice(
"What does pandas (the python library) stands for?",
["The cutest bear", "Pure Adamantium Numeric Datasets And Stuff",
"Panel Data", "PArties & DAtaSets"],
2,
success='',
error='',
button='Check THE answer'
)
if checked_answer:
if correct_answer:
st.info("Yes! It's Panel Data, but here's a pandas as a prize just for you!")
st.image('https://www.stockvault.net/data/2016/06/30/203684/preview16.jpg')
st.balloons()
else:
st.warning("Sadly, that's not true")
else:
st.write("You need to check the answer")
| 37.873684
| 100
| 0.49333
| 383
| 3,598
| 4.5953
| 0.245431
| 0.040909
| 0.051136
| 0.064773
| 0.894886
| 0.861932
| 0.861932
| 0.861932
| 0.846591
| 0.846591
| 0
| 0.02601
| 0.401612
| 3,598
| 94
| 101
| 38.276596
| 0.791454
| 0.013341
| 0
| 0.788235
| 0
| 0.047059
| 0.668641
| 0.012972
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.047059
| 0
| 0.047059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
fd878c86649ba35ab5da2c4aa8e2208cdbf7c026
| 1,003
|
py
|
Python
|
utils/category-lists/test_populate_category_lists_yaml.py
|
thomhalla/cv_faq
|
7b13b01f35b3d5fa5c8b25b57e69933fbb4b4963
|
[
"CC0-1.0"
] | 14
|
2020-04-13T21:44:11.000Z
|
2020-09-29T00:23:11.000Z
|
utils/category-lists/test_populate_category_lists_yaml.py
|
thomhalla/cv_faq
|
7b13b01f35b3d5fa5c8b25b57e69933fbb4b4963
|
[
"CC0-1.0"
] | 523
|
2020-04-14T15:03:21.000Z
|
2021-12-08T01:45:38.000Z
|
utils/category-lists/test_populate_category_lists_yaml.py
|
thomhalla/cv_faq
|
7b13b01f35b3d5fa5c8b25b57e69933fbb4b4963
|
[
"CC0-1.0"
] | 7
|
2020-04-14T23:08:25.000Z
|
2021-01-19T22:36:08.000Z
|
from populate_category_lists_yaml import sort_questions
def test_sort_questions():
assert sort_questions([
{'is_promoted': True, 'name': 'ddd-name', 'title': 'ddd-name'},
{'is_promoted': False, 'name': 'aaa-name', 'title': 'aaa-name'},
{'is_promoted': False, 'name': 'ccc-name', 'title': 'ccc-name'},
{'is_promoted': False, 'name': 'ddd-name', 'title': 'ddd-name'},
{'is_promoted': True, 'name': 'aaa-name', 'title': 'aaa-name'},
{'is_promoted': False, 'name': 'bbb-name', 'title': 'bbb-name'},
]) == [
{'is_promoted': True, 'name': 'aaa-name', 'title': 'aaa-name'},
{'is_promoted': True, 'name': 'ddd-name', 'title': 'ddd-name'},
{'is_promoted': False, 'name': 'aaa-name', 'title': 'aaa-name'},
{'is_promoted': False, 'name': 'bbb-name', 'title': 'bbb-name'},
{'is_promoted': False, 'name': 'ccc-name', 'title': 'ccc-name'},
{'is_promoted': False, 'name': 'ddd-name', 'title': 'ddd-name'},
]
| 50.15
| 72
| 0.556331
| 123
| 1,003
| 4.382114
| 0.170732
| 0.222635
| 0.285714
| 0.282004
| 0.83859
| 0.83859
| 0.83859
| 0.83859
| 0.83859
| 0.83859
| 0
| 0
| 0.19342
| 1,003
| 19
| 73
| 52.789474
| 0.666255
| 0
| 0
| 0.705882
| 0
| 0
| 0.430708
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 1
| 0.058824
| true
| 0
| 0.058824
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
e30ee655131897943b6d43f50433c7c971747be5
| 22,113
|
py
|
Python
|
tests/0_data_structures/test_0_0_graph.py
|
whikloj/lakesuperior
|
733ac54e9525dcb7c3161bc70f04415e81650c06
|
[
"Apache-2.0"
] | 35
|
2017-12-07T19:20:40.000Z
|
2021-07-31T04:35:03.000Z
|
tests/0_data_structures/test_0_0_graph.py
|
whikloj/lakesuperior
|
733ac54e9525dcb7c3161bc70f04415e81650c06
|
[
"Apache-2.0"
] | 61
|
2018-03-07T04:59:37.000Z
|
2020-01-08T00:52:25.000Z
|
tests/0_data_structures/test_0_0_graph.py
|
whikloj/lakesuperior
|
733ac54e9525dcb7c3161bc70f04415e81650c06
|
[
"Apache-2.0"
] | 7
|
2018-03-10T17:15:26.000Z
|
2019-09-11T01:16:08.000Z
|
import pdb
import pytest
from shutil import rmtree
from rdflib import Graph, Namespace, URIRef
from lakesuperior.model.rdf.graph import Graph
from lakesuperior.store.ldp_rs.lmdb_store import LmdbStore
@pytest.fixture(scope='class')
def store():
"""
Test LMDB store.
This store has a different life cycle than the one used for tests in higher
levels of the stack and is not bootstrapped (i.e. starts completely empty).
"""
env_path = '/tmp/test_lmdbstore'
# Remove previous test DBs
rmtree(env_path, ignore_errors=True)
store = LmdbStore(env_path)
yield store
store.close()
store.destroy()
@pytest.fixture(scope='class')
def trp():
return (
(URIRef('urn:s:0'), URIRef('urn:p:0'), URIRef('urn:o:0')),
# Exact same as [0].
(URIRef('urn:s:0'), URIRef('urn:p:0'), URIRef('urn:o:0')),
# NOTE: s and o are in reversed order.
(URIRef('urn:o:0'), URIRef('urn:p:0'), URIRef('urn:s:0')),
(URIRef('urn:s:0'), URIRef('urn:p:1'), URIRef('urn:o:0')),
(URIRef('urn:s:0'), URIRef('urn:p:1'), URIRef('urn:o:1')),
(URIRef('urn:s:1'), URIRef('urn:p:1'), URIRef('urn:o:1')),
(URIRef('urn:s:1'), URIRef('urn:p:2'), URIRef('urn:o:2')),
)
@pytest.mark.usefixtures('trp')
@pytest.mark.usefixtures('store')
class TestGraphInit:
"""
Test initialization of graphs with different base data sets.
"""
def test_empty(self, store):
"""
Test creation of an empty graph.
"""
# No transaction needed to init an empty graph.
gr = Graph(store)
# len() should not need a DB transaction open.
assert len(gr) == 0
def test_init_triples(self, trp, store):
"""
Test creation using a Python set.
"""
with store.txn_ctx():
gr = Graph(store, data=set(trp))
assert len(gr) == 6
for t in trp:
assert t in gr
@pytest.mark.usefixtures('trp')
@pytest.mark.usefixtures('store')
class TestGraphLookup:
"""
Test triple lookup.
"""
def test_lookup_all_unbound(self, trp, store):
"""
Test lookup ? ? ? (all unbound)
"""
with store.txn_ctx():
gr = Graph(store, data=set(trp))
flt_gr = gr.lookup((None, None, None))
assert len(flt_gr) == 6
assert trp[0] in flt_gr
assert trp[2] in flt_gr
assert trp[3] in flt_gr
assert trp[4] in flt_gr
assert trp[5] in flt_gr
assert trp[6] in flt_gr
def test_lookup_s(self, trp, store):
"""
Test lookup s ? ?
"""
with store.txn_ctx():
gr = Graph(store, data=set(trp))
flt_gr = gr.lookup((URIRef('urn:s:0'), None, None))
assert len(flt_gr) == 3
assert trp[0] in flt_gr
assert trp[3] in flt_gr
assert trp[4] in flt_gr
assert trp[2] not in flt_gr
assert trp[5] not in flt_gr
assert trp[6] not in flt_gr
# Test for empty results.
empty_flt_gr = gr.lookup((URIRef('urn:s:8'), None, None))
assert len(empty_flt_gr) == 0
def test_lookup_p(self, trp, store):
"""
Test lookup ? p ?
"""
with store.txn_ctx():
gr = Graph(store, data=set(trp))
flt_gr = gr.lookup((None, URIRef('urn:p:0'), None))
assert len(flt_gr) == 2
assert trp[0] in flt_gr
assert trp[2] in flt_gr
assert trp[3] not in flt_gr
assert trp[4] not in flt_gr
assert trp[5] not in flt_gr
assert trp[6] not in flt_gr
# Test for empty results.
empty_flt_gr = gr.lookup((None, URIRef('urn:p:8'), None))
assert len(empty_flt_gr) == 0
def test_lookup_o(self, trp, store):
"""
Test lookup ? ? o
"""
with store.txn_ctx():
gr = Graph(store, data=set(trp))
flt_gr = gr.lookup((None, None, URIRef('urn:o:1')))
assert len(flt_gr) == 2
assert trp[4] in flt_gr
assert trp[5] in flt_gr
assert trp[0] not in flt_gr
assert trp[2] not in flt_gr
assert trp[3] not in flt_gr
assert trp[6] not in flt_gr
# Test for empty results.
empty_flt_gr = gr.lookup((None, None, URIRef('urn:o:8')))
assert len(empty_flt_gr) == 0
def test_lookup_sp(self, trp, store):
"""
Test lookup s p ?
"""
with store.txn_ctx():
gr = Graph(store, data=set(trp))
flt_gr = gr.lookup((URIRef('urn:s:0'), URIRef('urn:p:1'), None))
assert len(flt_gr) == 2
assert trp[3] in flt_gr
assert trp[4] in flt_gr
assert trp[0] not in flt_gr
assert trp[2] not in flt_gr
assert trp[5] not in flt_gr
assert trp[6] not in flt_gr
# Test for empty results.
empty_flt_gr = gr.lookup((URIRef('urn:s:0'), URIRef('urn:p:2'), None))
assert len(empty_flt_gr) == 0
def test_lookup_so(self, trp, store):
"""
Test lookup s ? o
"""
with store.txn_ctx():
gr = Graph(store, data=set(trp))
flt_gr = gr.lookup((URIRef('urn:s:0'), None, URIRef('urn:o:0')))
assert len(flt_gr) == 2
assert trp[0] in flt_gr
assert trp[3] in flt_gr
assert trp[2] not in flt_gr
assert trp[4] not in flt_gr
assert trp[5] not in flt_gr
assert trp[6] not in flt_gr
# Test for empty results.
empty_flt_gr = gr.lookup((URIRef('urn:s:0'), None, URIRef('urn:o:2')))
assert len(empty_flt_gr) == 0
def test_lookup_po(self, trp, store):
"""
Test lookup ? p o
"""
with store.txn_ctx():
gr = Graph(store, data=set(trp))
flt_gr = gr.lookup((None, URIRef('urn:p:1'), URIRef('urn:o:1')))
assert len(flt_gr) == 2
assert trp[4] in flt_gr
assert trp[5] in flt_gr
assert trp[0] not in flt_gr
assert trp[2] not in flt_gr
assert trp[3] not in flt_gr
assert trp[6] not in flt_gr
# Test for empty results.
empty_flt_gr = gr.lookup((None, URIRef('urn:p:1'), URIRef('urn:o:2')))
assert len(empty_flt_gr) == 0
def test_lookup_spo(self, trp, store):
"""
Test lookup s p o
"""
with store.txn_ctx():
gr = Graph(store, data=set(trp))
flt_gr = gr.lookup(
(URIRef('urn:s:1'), URIRef('urn:p:1'), URIRef('urn:o:1'))
)
assert len(flt_gr) == 1
assert trp[5] in flt_gr
assert trp[0] not in flt_gr
assert trp[2] not in flt_gr
assert trp[3] not in flt_gr
assert trp[4] not in flt_gr
assert trp[6] not in flt_gr
# Test for empty results.
empty_flt_gr = gr.lookup(
(URIRef('urn:s:1'), URIRef('urn:p:1'), URIRef('urn:o:2'))
)
assert len(empty_flt_gr) == 0
@pytest.mark.usefixtures('trp')
@pytest.mark.usefixtures('store')
class TestGraphSlicing:
"""
Test triple lookup.
"""
# TODO
pass
@pytest.mark.usefixtures('trp')
@pytest.mark.usefixtures('store')
class TestGraphOps:
"""
Test various graph operations.
"""
def test_len(self, trp, store):
"""
Test the length of a graph with and without duplicates.
"""
with store.txn_ctx():
gr = Graph(store)
assert len(gr) == 0
gr.add((trp[0],))
assert len(gr) == 1
gr.add((trp[1],)) # Same values
assert len(gr) == 1
gr.add((trp[2],))
assert len(gr) == 2
gr.add(trp)
assert len(gr) == 6
def test_dup(self, trp, store):
"""
Test operations with duplicate triples.
"""
with store.txn_ctx():
gr = Graph(store)
gr.add((trp[0],))
assert trp[1] in gr
assert trp[2] not in gr
def test_remove(self, trp, store):
"""
Test adding and removing triples.
"""
with store.txn_ctx():
gr = Graph(store)
gr.add(trp)
gr.remove(trp[0])
assert len(gr) == 5
assert trp[0] not in gr
assert trp[1] not in gr
# This is the duplicate triple.
gr.remove(trp[1])
assert len(gr) == 5
# This is the triple in reverse order.
gr.remove(trp[2])
assert len(gr) == 4
gr.remove(trp[4])
assert len(gr) == 3
def test_union(self, trp, store):
"""
Test graph union.
"""
with store.txn_ctx():
gr1 = Graph(store, data={*trp[:3]})
gr2 = Graph(store, data={*trp[2:6]})
gr3 = gr1 | gr2
assert len(gr3) == 5
assert trp[0] in gr3
assert trp[4] in gr3
def test_ip_union(self, trp, store):
"""
Test graph in-place union.
"""
with store.txn_ctx():
gr1 = Graph(store, data={*trp[:3]})
gr2 = Graph(store, data={*trp[2:6]})
gr1 |= gr2
assert len(gr1) == 5
assert trp[0] in gr1
assert trp[4] in gr1
def test_addition(self, trp, store):
"""
Test graph addition.
"""
with store.txn_ctx():
gr1 = Graph(store, data={*trp[:3]})
gr2 = Graph(store, data={*trp[2:6]})
gr3 = gr1 + gr2
assert len(gr3) == 5
assert trp[0] in gr3
assert trp[4] in gr3
def test_ip_addition(self, trp, store):
"""
Test graph in-place addition.
"""
with store.txn_ctx():
gr1 = Graph(store, data={*trp[:3]})
gr2 = Graph(store, data={*trp[2:6]})
gr1 += gr2
assert len(gr1) == 5
assert trp[0] in gr1
assert trp[4] in gr1
def test_subtraction(self, trp, store):
"""
Test graph addition.
"""
with store.txn_ctx():
gr1 = Graph(store, data={*trp[:4]})
gr2 = Graph(store, data={*trp[2:6]})
gr3 = gr1 - gr2
assert len(gr3) == 1
assert trp[0] in gr3
assert trp[1] in gr3
assert trp[2] not in gr3
assert trp[3] not in gr3
assert trp[4] not in gr3
gr3 = gr2 - gr1
assert len(gr3) == 2
assert trp[0] not in gr3
assert trp[1] not in gr3
assert trp[2] not in gr3
assert trp[3] not in gr3
assert trp[4] in gr3
assert trp[5] in gr3
def test_ip_subtraction(self, trp, store):
"""
Test graph in-place addition.
"""
with store.txn_ctx():
gr1 = Graph(store, data={*trp[:4]})
gr2 = Graph(store, data={*trp[2:6]})
gr1 -= gr2
assert len(gr1) == 1
assert trp[0] in gr1
assert trp[1] in gr1
assert trp[2] not in gr1
assert trp[3] not in gr1
assert trp[4] not in gr1
def test_intersect(self, trp, store):
"""
Test graph intersextion.
"""
with store.txn_ctx():
gr1 = Graph(store, data={*trp[:4]})
gr2 = Graph(store, data={*trp[2:6]})
gr3 = gr1 & gr2
assert len(gr3) == 2
assert trp[2] in gr3
assert trp[3] in gr3
assert trp[0] not in gr3
assert trp[5] not in gr3
def test_ip_intersect(self, trp, store):
"""
Test graph intersextion.
"""
with store.txn_ctx():
gr1 = Graph(store, data={*trp[:4]})
gr2 = Graph(store, data={*trp[2:6]})
gr1 &= gr2
assert len(gr1) == 2
assert trp[2] in gr1
assert trp[3] in gr1
assert trp[0] not in gr1
assert trp[5] not in gr1
def test_xor(self, trp, store):
"""
Test graph intersextion.
"""
with store.txn_ctx():
gr1 = Graph(store, data={*trp[:4]})
gr2 = Graph(store, data={*trp[2:6]})
gr3 = gr1 ^ gr2
assert len(gr3) == 3
assert trp[2] not in gr3
assert trp[3] not in gr3
assert trp[0] in gr3
assert trp[5] in gr3
def test_ip_xor(self, trp, store):
"""
Test graph intersextion.
"""
with store.txn_ctx():
gr1 = Graph(store, data={*trp[:4]})
gr2 = Graph(store, data={*trp[2:6]})
gr1 ^= gr2
assert len(gr1) == 3
assert trp[2] not in gr1
assert trp[3] not in gr1
assert trp[0] in gr1
assert trp[5] in gr1
@pytest.mark.usefixtures('trp')
@pytest.mark.usefixtures('store')
class TestNamedGraphOps:
"""
Test various operations on a named graph.
"""
def test_len(self, trp, store):
"""
Test the length of a graph with and without duplicates.
"""
imr = Graph(store, uri='http://example.edu/imr01')
assert len(imr) == 0
with store.txn_ctx():
imr.add((trp[0],))
assert len(imr) == 1
imr.add((trp[1],)) # Same values
assert len(imr) == 1
imr.add((trp[2],))
assert len(imr) == 2
imr.add(trp)
assert len(imr) == 6
def test_dup(self, trp, store):
"""
Test operations with duplicate triples.
"""
imr = Graph(store, uri='http://example.edu/imr01')
with store.txn_ctx():
imr.add((trp[0],))
assert trp[1] in imr
assert trp[2] not in imr
def test_remove(self, trp, store):
"""
Test adding and removing triples.
"""
with store.txn_ctx():
imr = Graph(store, uri='http://example.edu/imr01', data={*trp})
imr.remove(trp[0])
assert len(imr) == 5
assert trp[0] not in imr
assert trp[1] not in imr
# This is the duplicate triple.
imr.remove(trp[1])
assert len(imr) == 5
# This is the triple in reverse order.
imr.remove(trp[2])
assert len(imr) == 4
imr.remove(trp[4])
assert len(imr) == 3
def test_union(self, trp, store):
"""
Test graph union.
"""
with store.txn_ctx():
gr1 = Graph(store, uri='http://example.edu/imr01', data={*trp[:3]})
gr2 = Graph(store, uri='http://example.edu/imr02', data={*trp[2:6]})
gr3 = gr1 | gr2
assert len(gr3) == 5
assert trp[0] in gr3
assert trp[4] in gr3
assert gr3.uri == None
def test_ip_union(self, trp, store):
"""
Test graph in-place union.
"""
with store.txn_ctx():
gr1 = Graph(store, uri='http://example.edu/imr01', data={*trp[:3]})
gr2 = Graph(store, uri='http://example.edu/imr02', data={*trp[2:6]})
gr1 |= gr2
assert len(gr1) == 5
assert trp[0] in gr1
assert trp[4] in gr1
assert gr1.uri == URIRef('http://example.edu/imr01')
def test_addition(self, trp, store):
"""
Test graph addition.
"""
with store.txn_ctx():
gr1 = Graph(store, uri='http://example.edu/imr01', data={*trp[:3]})
gr2 = Graph(store, uri='http://example.edu/imr02', data={*trp[2:6]})
gr3 = gr1 + gr2
assert len(gr3) == 5
assert trp[0] in gr3
assert trp[4] in gr3
assert gr3.uri == None
def test_ip_addition(self, trp, store):
"""
Test graph in-place addition.
"""
with store.txn_ctx():
gr1 = Graph(store, uri='http://example.edu/imr01', data={*trp[:3]})
gr2 = Graph(store, uri='http://example.edu/imr02', data={*trp[2:6]})
gr1 += gr2
assert len(gr1) == 5
assert trp[0] in gr1
assert trp[4] in gr1
assert gr1.uri == URIRef('http://example.edu/imr01')
def test_subtraction(self, trp, store):
"""
Test graph addition.
"""
with store.txn_ctx():
gr1 = Graph(store, uri='http://example.edu/imr01', data={*trp[:4]})
gr2 = Graph(store, uri='http://example.edu/imr02', data={*trp[2:6]})
gr3 = gr1 - gr2
assert len(gr3) == 1
assert trp[0] in gr3
assert trp[1] in gr3
assert trp[2] not in gr3
assert trp[3] not in gr3
assert trp[4] not in gr3
assert gr3.uri == None
gr3 = gr2 - gr1
assert len(gr3) == 2
assert trp[0] not in gr3
assert trp[1] not in gr3
assert trp[2] not in gr3
assert trp[3] not in gr3
assert trp[4] in gr3
assert trp[5] in gr3
assert gr3.uri == None
def test_ip_subtraction(self, trp, store):
"""
Test graph in-place addition.
"""
with store.txn_ctx():
gr1 = Graph(store, uri='http://example.edu/imr01', data={*trp[:4]})
gr2 = Graph(store, uri='http://example.edu/imr02', data={*trp[2:6]})
gr1 -= gr2
assert len(gr1) == 1
assert trp[0] in gr1
assert trp[1] in gr1
assert trp[2] not in gr1
assert trp[3] not in gr1
assert trp[4] not in gr1
assert gr1.uri == URIRef('http://example.edu/imr01')
def test_intersect(self, trp, store):
"""
Test graph intersextion.
"""
with store.txn_ctx():
gr1 = Graph(store, uri='http://example.edu/imr01', data={*trp[:4]})
gr2 = Graph(store, uri='http://example.edu/imr02', data={*trp[2:6]})
gr3 = gr1 & gr2
assert len(gr3) == 2
assert trp[2] in gr3
assert trp[3] in gr3
assert trp[0] not in gr3
assert trp[5] not in gr3
assert gr3.uri == None
def test_ip_intersect(self, trp, store):
"""
Test graph intersextion.
"""
with store.txn_ctx():
gr1 = Graph(store, uri='http://example.edu/imr01', data={*trp[:4]})
gr2 = Graph(store, uri='http://example.edu/imr02', data={*trp[2:6]})
gr1 &= gr2
assert len(gr1) == 2
assert trp[2] in gr1
assert trp[3] in gr1
assert trp[0] not in gr1
assert trp[5] not in gr1
assert gr1.uri == URIRef('http://example.edu/imr01')
def test_xor(self, trp, store):
"""
Test graph intersextion.
"""
with store.txn_ctx():
gr1 = Graph(store, uri='http://example.edu/imr01', data={*trp[:4]})
gr2 = Graph(store, uri='http://example.edu/imr02', data={*trp[2:6]})
gr3 = gr1 ^ gr2
assert len(gr3) == 3
assert trp[2] not in gr3
assert trp[3] not in gr3
assert trp[0] in gr3
assert trp[5] in gr3
assert gr3.uri == None
def test_ip_xor(self, trp, store):
"""
Test graph intersextion.
"""
with store.txn_ctx():
gr1 = Graph(store, uri='http://example.edu/imr01', data={*trp[:4]})
gr2 = Graph(store, uri='http://example.edu/imr02', data={*trp[2:6]})
gr1 ^= gr2
assert len(gr1) == 3
assert trp[2] not in gr1
assert trp[3] not in gr1
assert trp[0] in gr1
assert trp[5] in gr1
assert gr1.uri == URIRef('http://example.edu/imr01')
@pytest.mark.usefixtures('trp')
@pytest.mark.usefixtures('store')
class TestHybridOps:
"""
Test operations between IMR and graph.
"""
def test_hybrid_union(self, trp, store):
"""
Test hybrid IMR + graph union.
"""
with store.txn_ctx():
gr1 = Graph(store, uri='http://example.edu/imr01', data={*trp[:3]})
gr2 = Graph(store, data={*trp[2:6]})
gr3 = gr1 | gr2
assert len(gr3) == 5
assert trp[0] in gr3
assert trp[4] in gr3
assert isinstance(gr3, Graph)
assert gr3.uri == None
gr4 = gr2 | gr1
assert isinstance(gr4, Graph)
assert gr3 == gr4
def test_ip_union_imr(self, trp, store):
"""
Test IMR + graph in-place union.
"""
with store.txn_ctx():
gr1 = Graph(store, uri='http://example.edu/imr01', data={*trp[:3]})
gr2 = Graph(store, data={*trp[2:6]})
gr1 |= gr2
assert len(gr1) == 5
assert trp[0] in gr1
assert trp[4] in gr1
assert gr1.uri == URIRef('http://example.edu/imr01')
def test_ip_union_gr(self, trp, store):
"""
Test graph + IMR in-place union.
"""
with store.txn_ctx():
gr1 = Graph(store, data={*trp[:3]})
gr2 = Graph(store, uri='http://example.edu/imr01', data={*trp[2:6]})
gr1 |= gr2
assert len(gr1) == 5
assert trp[0] in gr1
assert trp[4] in gr1
assert isinstance(gr1, Graph)
| 25.984724
| 82
| 0.492109
| 2,960
| 22,113
| 3.608446
| 0.056419
| 0.119652
| 0.031458
| 0.048685
| 0.875667
| 0.833255
| 0.815841
| 0.794308
| 0.779515
| 0.732797
| 0
| 0.046242
| 0.376068
| 22,113
| 850
| 83
| 26.015294
| 0.727912
| 0.088726
| 0
| 0.782051
| 0
| 0
| 0.060663
| 0
| 0
| 0
| 0
| 0.001176
| 0.470085
| 1
| 0.087607
| false
| 0.002137
| 0.012821
| 0.002137
| 0.115385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8b5c5cfd5aaddb623e883cf5529ed9e164b2f8dd
| 249
|
py
|
Python
|
takaggle/feature/__init__.py
|
takapy0210/takaggle
|
fcaa6ef23f3fd2a5a8ebe15e66b66c99d684d8d0
|
[
"MIT"
] | 3
|
2021-03-21T02:28:25.000Z
|
2022-02-12T07:28:56.000Z
|
takaggle/feature/__init__.py
|
takapy0210/takaggle
|
fcaa6ef23f3fd2a5a8ebe15e66b66c99d684d8d0
|
[
"MIT"
] | null | null | null |
takaggle/feature/__init__.py
|
takapy0210/takaggle
|
fcaa6ef23f3fd2a5a8ebe15e66b66c99d684d8d0
|
[
"MIT"
] | null | null | null |
from takaggle.feature.bert_sentence_vectorizer import *
from takaggle.feature.category_encoder import *
from takaggle.feature.feature_engineering import *
from takaggle.feature.feature_selection import *
from takaggle.feature.reduce_memory import *
| 41.5
| 55
| 0.859438
| 31
| 249
| 6.709677
| 0.419355
| 0.288462
| 0.456731
| 0.480769
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080321
| 249
| 5
| 56
| 49.8
| 0.908297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d07d5d52cecf936ba8f67f4cb8d9b2613dd19f20
| 39,245
|
py
|
Python
|
sdk/python/pulumi_snowflake/notification_integration.py
|
pulumi/pulumi-snowflake
|
c3e0c2c8f57fd7b986b9259be635de6f28ab2eea
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2021-07-01T17:03:33.000Z
|
2022-03-01T19:29:04.000Z
|
sdk/python/pulumi_snowflake/notification_integration.py
|
pulumi/pulumi-snowflake
|
c3e0c2c8f57fd7b986b9259be635de6f28ab2eea
|
[
"ECL-2.0",
"Apache-2.0"
] | 102
|
2021-07-14T13:12:58.000Z
|
2022-03-31T18:34:04.000Z
|
sdk/python/pulumi_snowflake/notification_integration.py
|
pulumi/pulumi-snowflake
|
c3e0c2c8f57fd7b986b9259be635de6f28ab2eea
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-03-25T07:24:45.000Z
|
2022-03-25T07:24:45.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['NotificationIntegrationArgs', 'NotificationIntegration']
@pulumi.input_type
class NotificationIntegrationArgs:
def __init__(__self__, *,
aws_sns_role_arn: Optional[pulumi.Input[str]] = None,
aws_sns_topic_arn: Optional[pulumi.Input[str]] = None,
aws_sqs_arn: Optional[pulumi.Input[str]] = None,
aws_sqs_role_arn: Optional[pulumi.Input[str]] = None,
azure_storage_queue_primary_uri: Optional[pulumi.Input[str]] = None,
azure_tenant_id: Optional[pulumi.Input[str]] = None,
comment: Optional[pulumi.Input[str]] = None,
direction: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
gcp_pubsub_subscription_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_provider: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a NotificationIntegration resource.
:param pulumi.Input[str] aws_sns_role_arn: AWS IAM role ARN for notification integration to assume
:param pulumi.Input[str] aws_sns_topic_arn: AWS SNS Topic ARN for notification integration to connect to
:param pulumi.Input[str] aws_sqs_arn: AWS SQS queue ARN for notification integration to connect to
:param pulumi.Input[str] aws_sqs_role_arn: AWS IAM role ARN for notification integration to assume
:param pulumi.Input[str] azure_storage_queue_primary_uri: The queue ID for the Azure Queue Storage queue created for Event Grid notifications
:param pulumi.Input[str] azure_tenant_id: The ID of the Azure Active Directory tenant used for identity management
:param pulumi.Input[str] comment: A comment for the integration
:param pulumi.Input[str] direction: Direction of the cloud messaging with respect to Snowflake (required only for error notifications)
:param pulumi.Input[str] gcp_pubsub_subscription_name: The subscription id that Snowflake will listen to when using the GCP_PUBSUB provider.
:param pulumi.Input[str] notification_provider: The third-party cloud message queuing service (e.g. AZURE*STORAGE*QUEUE, AWS*SQS, AWS*SNS)
:param pulumi.Input[str] type: A type of integration
"""
if aws_sns_role_arn is not None:
pulumi.set(__self__, "aws_sns_role_arn", aws_sns_role_arn)
if aws_sns_topic_arn is not None:
pulumi.set(__self__, "aws_sns_topic_arn", aws_sns_topic_arn)
if aws_sqs_arn is not None:
pulumi.set(__self__, "aws_sqs_arn", aws_sqs_arn)
if aws_sqs_role_arn is not None:
pulumi.set(__self__, "aws_sqs_role_arn", aws_sqs_role_arn)
if azure_storage_queue_primary_uri is not None:
pulumi.set(__self__, "azure_storage_queue_primary_uri", azure_storage_queue_primary_uri)
if azure_tenant_id is not None:
pulumi.set(__self__, "azure_tenant_id", azure_tenant_id)
if comment is not None:
pulumi.set(__self__, "comment", comment)
if direction is not None:
pulumi.set(__self__, "direction", direction)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if gcp_pubsub_subscription_name is not None:
pulumi.set(__self__, "gcp_pubsub_subscription_name", gcp_pubsub_subscription_name)
if name is not None:
pulumi.set(__self__, "name", name)
if notification_provider is not None:
pulumi.set(__self__, "notification_provider", notification_provider)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="awsSnsRoleArn")
def aws_sns_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
AWS IAM role ARN for notification integration to assume
"""
return pulumi.get(self, "aws_sns_role_arn")
@aws_sns_role_arn.setter
def aws_sns_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_sns_role_arn", value)
@property
@pulumi.getter(name="awsSnsTopicArn")
def aws_sns_topic_arn(self) -> Optional[pulumi.Input[str]]:
"""
AWS SNS Topic ARN for notification integration to connect to
"""
return pulumi.get(self, "aws_sns_topic_arn")
@aws_sns_topic_arn.setter
def aws_sns_topic_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_sns_topic_arn", value)
@property
@pulumi.getter(name="awsSqsArn")
def aws_sqs_arn(self) -> Optional[pulumi.Input[str]]:
"""
AWS SQS queue ARN for notification integration to connect to
"""
return pulumi.get(self, "aws_sqs_arn")
@aws_sqs_arn.setter
def aws_sqs_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_sqs_arn", value)
@property
@pulumi.getter(name="awsSqsRoleArn")
def aws_sqs_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
AWS IAM role ARN for notification integration to assume
"""
return pulumi.get(self, "aws_sqs_role_arn")
@aws_sqs_role_arn.setter
def aws_sqs_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_sqs_role_arn", value)
@property
@pulumi.getter(name="azureStorageQueuePrimaryUri")
def azure_storage_queue_primary_uri(self) -> Optional[pulumi.Input[str]]:
"""
The queue ID for the Azure Queue Storage queue created for Event Grid notifications
"""
return pulumi.get(self, "azure_storage_queue_primary_uri")
@azure_storage_queue_primary_uri.setter
def azure_storage_queue_primary_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "azure_storage_queue_primary_uri", value)
@property
@pulumi.getter(name="azureTenantId")
def azure_tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Azure Active Directory tenant used for identity management
"""
return pulumi.get(self, "azure_tenant_id")
@azure_tenant_id.setter
def azure_tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "azure_tenant_id", value)
@property
@pulumi.getter
def comment(self) -> Optional[pulumi.Input[str]]:
"""
A comment for the integration
"""
return pulumi.get(self, "comment")
@comment.setter
def comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comment", value)
@property
@pulumi.getter
def direction(self) -> Optional[pulumi.Input[str]]:
"""
Direction of the cloud messaging with respect to Snowflake (required only for error notifications)
"""
return pulumi.get(self, "direction")
@direction.setter
def direction(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "direction", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="gcpPubsubSubscriptionName")
def gcp_pubsub_subscription_name(self) -> Optional[pulumi.Input[str]]:
"""
The subscription id that Snowflake will listen to when using the GCP_PUBSUB provider.
"""
return pulumi.get(self, "gcp_pubsub_subscription_name")
@gcp_pubsub_subscription_name.setter
def gcp_pubsub_subscription_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gcp_pubsub_subscription_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="notificationProvider")
def notification_provider(self) -> Optional[pulumi.Input[str]]:
"""
The third-party cloud message queuing service (e.g. AZURE*STORAGE*QUEUE, AWS*SQS, AWS*SNS)
"""
return pulumi.get(self, "notification_provider")
@notification_provider.setter
def notification_provider(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notification_provider", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
A type of integration
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class _NotificationIntegrationState:
def __init__(__self__, *,
aws_sns_external_id: Optional[pulumi.Input[str]] = None,
aws_sns_iam_user_arn: Optional[pulumi.Input[str]] = None,
aws_sns_role_arn: Optional[pulumi.Input[str]] = None,
aws_sns_topic_arn: Optional[pulumi.Input[str]] = None,
aws_sqs_arn: Optional[pulumi.Input[str]] = None,
aws_sqs_external_id: Optional[pulumi.Input[str]] = None,
aws_sqs_iam_user_arn: Optional[pulumi.Input[str]] = None,
aws_sqs_role_arn: Optional[pulumi.Input[str]] = None,
azure_storage_queue_primary_uri: Optional[pulumi.Input[str]] = None,
azure_tenant_id: Optional[pulumi.Input[str]] = None,
comment: Optional[pulumi.Input[str]] = None,
created_on: Optional[pulumi.Input[str]] = None,
direction: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
gcp_pubsub_subscription_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_provider: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering NotificationIntegration resources.
:param pulumi.Input[str] aws_sns_external_id: The external ID that Snowflake will use when assuming the AWS role
:param pulumi.Input[str] aws_sns_iam_user_arn: The Snowflake user that will attempt to assume the AWS role.
:param pulumi.Input[str] aws_sns_role_arn: AWS IAM role ARN for notification integration to assume
:param pulumi.Input[str] aws_sns_topic_arn: AWS SNS Topic ARN for notification integration to connect to
:param pulumi.Input[str] aws_sqs_arn: AWS SQS queue ARN for notification integration to connect to
:param pulumi.Input[str] aws_sqs_external_id: The external ID that Snowflake will use when assuming the AWS role
:param pulumi.Input[str] aws_sqs_iam_user_arn: The Snowflake user that will attempt to assume the AWS role.
:param pulumi.Input[str] aws_sqs_role_arn: AWS IAM role ARN for notification integration to assume
:param pulumi.Input[str] azure_storage_queue_primary_uri: The queue ID for the Azure Queue Storage queue created for Event Grid notifications
:param pulumi.Input[str] azure_tenant_id: The ID of the Azure Active Directory tenant used for identity management
:param pulumi.Input[str] comment: A comment for the integration
:param pulumi.Input[str] created_on: Date and time when the notification integration was created.
:param pulumi.Input[str] direction: Direction of the cloud messaging with respect to Snowflake (required only for error notifications)
:param pulumi.Input[str] gcp_pubsub_subscription_name: The subscription id that Snowflake will listen to when using the GCP_PUBSUB provider.
:param pulumi.Input[str] notification_provider: The third-party cloud message queuing service (e.g. AZURE*STORAGE*QUEUE, AWS*SQS, AWS*SNS)
:param pulumi.Input[str] type: A type of integration
"""
if aws_sns_external_id is not None:
pulumi.set(__self__, "aws_sns_external_id", aws_sns_external_id)
if aws_sns_iam_user_arn is not None:
pulumi.set(__self__, "aws_sns_iam_user_arn", aws_sns_iam_user_arn)
if aws_sns_role_arn is not None:
pulumi.set(__self__, "aws_sns_role_arn", aws_sns_role_arn)
if aws_sns_topic_arn is not None:
pulumi.set(__self__, "aws_sns_topic_arn", aws_sns_topic_arn)
if aws_sqs_arn is not None:
pulumi.set(__self__, "aws_sqs_arn", aws_sqs_arn)
if aws_sqs_external_id is not None:
pulumi.set(__self__, "aws_sqs_external_id", aws_sqs_external_id)
if aws_sqs_iam_user_arn is not None:
pulumi.set(__self__, "aws_sqs_iam_user_arn", aws_sqs_iam_user_arn)
if aws_sqs_role_arn is not None:
pulumi.set(__self__, "aws_sqs_role_arn", aws_sqs_role_arn)
if azure_storage_queue_primary_uri is not None:
pulumi.set(__self__, "azure_storage_queue_primary_uri", azure_storage_queue_primary_uri)
if azure_tenant_id is not None:
pulumi.set(__self__, "azure_tenant_id", azure_tenant_id)
if comment is not None:
pulumi.set(__self__, "comment", comment)
if created_on is not None:
pulumi.set(__self__, "created_on", created_on)
if direction is not None:
pulumi.set(__self__, "direction", direction)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if gcp_pubsub_subscription_name is not None:
pulumi.set(__self__, "gcp_pubsub_subscription_name", gcp_pubsub_subscription_name)
if name is not None:
pulumi.set(__self__, "name", name)
if notification_provider is not None:
pulumi.set(__self__, "notification_provider", notification_provider)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="awsSnsExternalId")
def aws_sns_external_id(self) -> Optional[pulumi.Input[str]]:
"""
The external ID that Snowflake will use when assuming the AWS role
"""
return pulumi.get(self, "aws_sns_external_id")
@aws_sns_external_id.setter
def aws_sns_external_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_sns_external_id", value)
@property
@pulumi.getter(name="awsSnsIamUserArn")
def aws_sns_iam_user_arn(self) -> Optional[pulumi.Input[str]]:
"""
The Snowflake user that will attempt to assume the AWS role.
"""
return pulumi.get(self, "aws_sns_iam_user_arn")
@aws_sns_iam_user_arn.setter
def aws_sns_iam_user_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_sns_iam_user_arn", value)
@property
@pulumi.getter(name="awsSnsRoleArn")
def aws_sns_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
AWS IAM role ARN for notification integration to assume
"""
return pulumi.get(self, "aws_sns_role_arn")
@aws_sns_role_arn.setter
def aws_sns_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_sns_role_arn", value)
@property
@pulumi.getter(name="awsSnsTopicArn")
def aws_sns_topic_arn(self) -> Optional[pulumi.Input[str]]:
"""
AWS SNS Topic ARN for notification integration to connect to
"""
return pulumi.get(self, "aws_sns_topic_arn")
@aws_sns_topic_arn.setter
def aws_sns_topic_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_sns_topic_arn", value)
@property
@pulumi.getter(name="awsSqsArn")
def aws_sqs_arn(self) -> Optional[pulumi.Input[str]]:
"""
AWS SQS queue ARN for notification integration to connect to
"""
return pulumi.get(self, "aws_sqs_arn")
@aws_sqs_arn.setter
def aws_sqs_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_sqs_arn", value)
@property
@pulumi.getter(name="awsSqsExternalId")
def aws_sqs_external_id(self) -> Optional[pulumi.Input[str]]:
"""
The external ID that Snowflake will use when assuming the AWS role
"""
return pulumi.get(self, "aws_sqs_external_id")
@aws_sqs_external_id.setter
def aws_sqs_external_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_sqs_external_id", value)
@property
@pulumi.getter(name="awsSqsIamUserArn")
def aws_sqs_iam_user_arn(self) -> Optional[pulumi.Input[str]]:
"""
The Snowflake user that will attempt to assume the AWS role.
"""
return pulumi.get(self, "aws_sqs_iam_user_arn")
@aws_sqs_iam_user_arn.setter
def aws_sqs_iam_user_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_sqs_iam_user_arn", value)
@property
@pulumi.getter(name="awsSqsRoleArn")
def aws_sqs_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
AWS IAM role ARN for notification integration to assume
"""
return pulumi.get(self, "aws_sqs_role_arn")
@aws_sqs_role_arn.setter
def aws_sqs_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "aws_sqs_role_arn", value)
@property
@pulumi.getter(name="azureStorageQueuePrimaryUri")
def azure_storage_queue_primary_uri(self) -> Optional[pulumi.Input[str]]:
"""
The queue ID for the Azure Queue Storage queue created for Event Grid notifications
"""
return pulumi.get(self, "azure_storage_queue_primary_uri")
@azure_storage_queue_primary_uri.setter
def azure_storage_queue_primary_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "azure_storage_queue_primary_uri", value)
@property
@pulumi.getter(name="azureTenantId")
def azure_tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Azure Active Directory tenant used for identity management
"""
return pulumi.get(self, "azure_tenant_id")
@azure_tenant_id.setter
def azure_tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "azure_tenant_id", value)
@property
@pulumi.getter
def comment(self) -> Optional[pulumi.Input[str]]:
"""
A comment for the integration
"""
return pulumi.get(self, "comment")
@comment.setter
def comment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "comment", value)
@property
@pulumi.getter(name="createdOn")
def created_on(self) -> Optional[pulumi.Input[str]]:
"""
Date and time when the notification integration was created.
"""
return pulumi.get(self, "created_on")
@created_on.setter
def created_on(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "created_on", value)
@property
@pulumi.getter
def direction(self) -> Optional[pulumi.Input[str]]:
"""
Direction of the cloud messaging with respect to Snowflake (required only for error notifications)
"""
return pulumi.get(self, "direction")
@direction.setter
def direction(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "direction", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="gcpPubsubSubscriptionName")
def gcp_pubsub_subscription_name(self) -> Optional[pulumi.Input[str]]:
"""
The subscription id that Snowflake will listen to when using the GCP_PUBSUB provider.
"""
return pulumi.get(self, "gcp_pubsub_subscription_name")
@gcp_pubsub_subscription_name.setter
def gcp_pubsub_subscription_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gcp_pubsub_subscription_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="notificationProvider")
def notification_provider(self) -> Optional[pulumi.Input[str]]:
"""
The third-party cloud message queuing service (e.g. AZURE*STORAGE*QUEUE, AWS*SQS, AWS*SNS)
"""
return pulumi.get(self, "notification_provider")
@notification_provider.setter
def notification_provider(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notification_provider", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
A type of integration
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
class NotificationIntegration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aws_sns_role_arn: Optional[pulumi.Input[str]] = None,
aws_sns_topic_arn: Optional[pulumi.Input[str]] = None,
aws_sqs_arn: Optional[pulumi.Input[str]] = None,
aws_sqs_role_arn: Optional[pulumi.Input[str]] = None,
azure_storage_queue_primary_uri: Optional[pulumi.Input[str]] = None,
azure_tenant_id: Optional[pulumi.Input[str]] = None,
comment: Optional[pulumi.Input[str]] = None,
direction: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
gcp_pubsub_subscription_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_provider: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Example Usage
```python
import pulumi
import pulumi_snowflake as snowflake
integration = snowflake.NotificationIntegration("integration",
aws_sns_role_arn="...",
aws_sns_topic_arn="...",
aws_sqs_arn="...",
aws_sqs_role_arn="...",
azure_storage_queue_primary_uri="...",
azure_tenant_id="...",
comment="A notification integration.",
direction="OUTBOUND",
enabled=True,
notification_provider="AWS_SNS",
type="QUEUE")
```
## Import
```sh
$ pulumi import snowflake:index/notificationIntegration:NotificationIntegration example name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] aws_sns_role_arn: AWS IAM role ARN for notification integration to assume
:param pulumi.Input[str] aws_sns_topic_arn: AWS SNS Topic ARN for notification integration to connect to
:param pulumi.Input[str] aws_sqs_arn: AWS SQS queue ARN for notification integration to connect to
:param pulumi.Input[str] aws_sqs_role_arn: AWS IAM role ARN for notification integration to assume
:param pulumi.Input[str] azure_storage_queue_primary_uri: The queue ID for the Azure Queue Storage queue created for Event Grid notifications
:param pulumi.Input[str] azure_tenant_id: The ID of the Azure Active Directory tenant used for identity management
:param pulumi.Input[str] comment: A comment for the integration
:param pulumi.Input[str] direction: Direction of the cloud messaging with respect to Snowflake (required only for error notifications)
:param pulumi.Input[str] gcp_pubsub_subscription_name: The subscription id that Snowflake will listen to when using the GCP_PUBSUB provider.
:param pulumi.Input[str] notification_provider: The third-party cloud message queuing service (e.g. AZURE*STORAGE*QUEUE, AWS*SQS, AWS*SNS)
:param pulumi.Input[str] type: A type of integration
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[NotificationIntegrationArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Example Usage
```python
import pulumi
import pulumi_snowflake as snowflake
integration = snowflake.NotificationIntegration("integration",
aws_sns_role_arn="...",
aws_sns_topic_arn="...",
aws_sqs_arn="...",
aws_sqs_role_arn="...",
azure_storage_queue_primary_uri="...",
azure_tenant_id="...",
comment="A notification integration.",
direction="OUTBOUND",
enabled=True,
notification_provider="AWS_SNS",
type="QUEUE")
```
## Import
```sh
$ pulumi import snowflake:index/notificationIntegration:NotificationIntegration example name
```
:param str resource_name: The name of the resource.
:param NotificationIntegrationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NotificationIntegrationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aws_sns_role_arn: Optional[pulumi.Input[str]] = None,
aws_sns_topic_arn: Optional[pulumi.Input[str]] = None,
aws_sqs_arn: Optional[pulumi.Input[str]] = None,
aws_sqs_role_arn: Optional[pulumi.Input[str]] = None,
azure_storage_queue_primary_uri: Optional[pulumi.Input[str]] = None,
azure_tenant_id: Optional[pulumi.Input[str]] = None,
comment: Optional[pulumi.Input[str]] = None,
direction: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
gcp_pubsub_subscription_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_provider: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NotificationIntegrationArgs.__new__(NotificationIntegrationArgs)
__props__.__dict__["aws_sns_role_arn"] = aws_sns_role_arn
__props__.__dict__["aws_sns_topic_arn"] = aws_sns_topic_arn
__props__.__dict__["aws_sqs_arn"] = aws_sqs_arn
__props__.__dict__["aws_sqs_role_arn"] = aws_sqs_role_arn
__props__.__dict__["azure_storage_queue_primary_uri"] = azure_storage_queue_primary_uri
__props__.__dict__["azure_tenant_id"] = azure_tenant_id
__props__.__dict__["comment"] = comment
__props__.__dict__["direction"] = direction
__props__.__dict__["enabled"] = enabled
__props__.__dict__["gcp_pubsub_subscription_name"] = gcp_pubsub_subscription_name
__props__.__dict__["name"] = name
__props__.__dict__["notification_provider"] = notification_provider
__props__.__dict__["type"] = type
__props__.__dict__["aws_sns_external_id"] = None
__props__.__dict__["aws_sns_iam_user_arn"] = None
__props__.__dict__["aws_sqs_external_id"] = None
__props__.__dict__["aws_sqs_iam_user_arn"] = None
__props__.__dict__["created_on"] = None
super(NotificationIntegration, __self__).__init__(
'snowflake:index/notificationIntegration:NotificationIntegration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
aws_sns_external_id: Optional[pulumi.Input[str]] = None,
aws_sns_iam_user_arn: Optional[pulumi.Input[str]] = None,
aws_sns_role_arn: Optional[pulumi.Input[str]] = None,
aws_sns_topic_arn: Optional[pulumi.Input[str]] = None,
aws_sqs_arn: Optional[pulumi.Input[str]] = None,
aws_sqs_external_id: Optional[pulumi.Input[str]] = None,
aws_sqs_iam_user_arn: Optional[pulumi.Input[str]] = None,
aws_sqs_role_arn: Optional[pulumi.Input[str]] = None,
azure_storage_queue_primary_uri: Optional[pulumi.Input[str]] = None,
azure_tenant_id: Optional[pulumi.Input[str]] = None,
comment: Optional[pulumi.Input[str]] = None,
created_on: Optional[pulumi.Input[str]] = None,
direction: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
gcp_pubsub_subscription_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_provider: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None) -> 'NotificationIntegration':
"""
Get an existing NotificationIntegration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] aws_sns_external_id: The external ID that Snowflake will use when assuming the AWS role
:param pulumi.Input[str] aws_sns_iam_user_arn: The Snowflake user that will attempt to assume the AWS role.
:param pulumi.Input[str] aws_sns_role_arn: AWS IAM role ARN for notification integration to assume
:param pulumi.Input[str] aws_sns_topic_arn: AWS SNS Topic ARN for notification integration to connect to
:param pulumi.Input[str] aws_sqs_arn: AWS SQS queue ARN for notification integration to connect to
:param pulumi.Input[str] aws_sqs_external_id: The external ID that Snowflake will use when assuming the AWS role
:param pulumi.Input[str] aws_sqs_iam_user_arn: The Snowflake user that will attempt to assume the AWS role.
:param pulumi.Input[str] aws_sqs_role_arn: AWS IAM role ARN for notification integration to assume
:param pulumi.Input[str] azure_storage_queue_primary_uri: The queue ID for the Azure Queue Storage queue created for Event Grid notifications
:param pulumi.Input[str] azure_tenant_id: The ID of the Azure Active Directory tenant used for identity management
:param pulumi.Input[str] comment: A comment for the integration
:param pulumi.Input[str] created_on: Date and time when the notification integration was created.
:param pulumi.Input[str] direction: Direction of the cloud messaging with respect to Snowflake (required only for error notifications)
:param pulumi.Input[str] gcp_pubsub_subscription_name: The subscription id that Snowflake will listen to when using the GCP_PUBSUB provider.
:param pulumi.Input[str] notification_provider: The third-party cloud message queuing service (e.g. AZURE*STORAGE*QUEUE, AWS*SQS, AWS*SNS)
:param pulumi.Input[str] type: A type of integration
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _NotificationIntegrationState.__new__(_NotificationIntegrationState)
__props__.__dict__["aws_sns_external_id"] = aws_sns_external_id
__props__.__dict__["aws_sns_iam_user_arn"] = aws_sns_iam_user_arn
__props__.__dict__["aws_sns_role_arn"] = aws_sns_role_arn
__props__.__dict__["aws_sns_topic_arn"] = aws_sns_topic_arn
__props__.__dict__["aws_sqs_arn"] = aws_sqs_arn
__props__.__dict__["aws_sqs_external_id"] = aws_sqs_external_id
__props__.__dict__["aws_sqs_iam_user_arn"] = aws_sqs_iam_user_arn
__props__.__dict__["aws_sqs_role_arn"] = aws_sqs_role_arn
__props__.__dict__["azure_storage_queue_primary_uri"] = azure_storage_queue_primary_uri
__props__.__dict__["azure_tenant_id"] = azure_tenant_id
__props__.__dict__["comment"] = comment
__props__.__dict__["created_on"] = created_on
__props__.__dict__["direction"] = direction
__props__.__dict__["enabled"] = enabled
__props__.__dict__["gcp_pubsub_subscription_name"] = gcp_pubsub_subscription_name
__props__.__dict__["name"] = name
__props__.__dict__["notification_provider"] = notification_provider
__props__.__dict__["type"] = type
return NotificationIntegration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="awsSnsExternalId")
def aws_sns_external_id(self) -> pulumi.Output[str]:
"""
The external ID that Snowflake will use when assuming the AWS role
"""
return pulumi.get(self, "aws_sns_external_id")
@property
@pulumi.getter(name="awsSnsIamUserArn")
def aws_sns_iam_user_arn(self) -> pulumi.Output[str]:
"""
The Snowflake user that will attempt to assume the AWS role.
"""
return pulumi.get(self, "aws_sns_iam_user_arn")
@property
@pulumi.getter(name="awsSnsRoleArn")
def aws_sns_role_arn(self) -> pulumi.Output[Optional[str]]:
"""
AWS IAM role ARN for notification integration to assume
"""
return pulumi.get(self, "aws_sns_role_arn")
@property
@pulumi.getter(name="awsSnsTopicArn")
def aws_sns_topic_arn(self) -> pulumi.Output[Optional[str]]:
"""
AWS SNS Topic ARN for notification integration to connect to
"""
return pulumi.get(self, "aws_sns_topic_arn")
@property
@pulumi.getter(name="awsSqsArn")
def aws_sqs_arn(self) -> pulumi.Output[Optional[str]]:
"""
AWS SQS queue ARN for notification integration to connect to
"""
return pulumi.get(self, "aws_sqs_arn")
@property
@pulumi.getter(name="awsSqsExternalId")
def aws_sqs_external_id(self) -> pulumi.Output[str]:
"""
The external ID that Snowflake will use when assuming the AWS role
"""
return pulumi.get(self, "aws_sqs_external_id")
@property
@pulumi.getter(name="awsSqsIamUserArn")
def aws_sqs_iam_user_arn(self) -> pulumi.Output[str]:
"""
The Snowflake user that will attempt to assume the AWS role.
"""
return pulumi.get(self, "aws_sqs_iam_user_arn")
@property
@pulumi.getter(name="awsSqsRoleArn")
def aws_sqs_role_arn(self) -> pulumi.Output[Optional[str]]:
"""
AWS IAM role ARN for notification integration to assume
"""
return pulumi.get(self, "aws_sqs_role_arn")
@property
@pulumi.getter(name="azureStorageQueuePrimaryUri")
def azure_storage_queue_primary_uri(self) -> pulumi.Output[Optional[str]]:
"""
The queue ID for the Azure Queue Storage queue created for Event Grid notifications
"""
return pulumi.get(self, "azure_storage_queue_primary_uri")
@property
@pulumi.getter(name="azureTenantId")
def azure_tenant_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the Azure Active Directory tenant used for identity management
"""
return pulumi.get(self, "azure_tenant_id")
@property
@pulumi.getter
def comment(self) -> pulumi.Output[Optional[str]]:
"""
A comment for the integration
"""
return pulumi.get(self, "comment")
@property
@pulumi.getter(name="createdOn")
def created_on(self) -> pulumi.Output[str]:
"""
Date and time when the notification integration was created.
"""
return pulumi.get(self, "created_on")
@property
@pulumi.getter
def direction(self) -> pulumi.Output[Optional[str]]:
"""
Direction of the cloud messaging with respect to Snowflake (required only for error notifications)
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="gcpPubsubSubscriptionName")
def gcp_pubsub_subscription_name(self) -> pulumi.Output[Optional[str]]:
"""
The subscription id that Snowflake will listen to when using the GCP_PUBSUB provider.
"""
return pulumi.get(self, "gcp_pubsub_subscription_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationProvider")
def notification_provider(self) -> pulumi.Output[Optional[str]]:
"""
The third-party cloud message queuing service (e.g. AZURE*STORAGE*QUEUE, AWS*SQS, AWS*SNS)
"""
return pulumi.get(self, "notification_provider")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
A type of integration
"""
return pulumi.get(self, "type")
| 44.902746
| 149
| 0.665461
| 4,899
| 39,245
| 5.022658
| 0.040008
| 0.087174
| 0.10469
| 0.114444
| 0.922783
| 0.912217
| 0.89279
| 0.88324
| 0.87629
| 0.861944
| 0
| 0.000033
| 0.236285
| 39,245
| 873
| 150
| 44.954181
| 0.820933
| 0.275755
| 0
| 0.811175
| 1
| 0
| 0.114869
| 0.038202
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16763
| false
| 0.001927
| 0.009634
| 0.011561
| 0.279383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d07df3a5c5ddb9a56f4593ddf31d43b08bcd379e
| 93
|
py
|
Python
|
up/tasks/det_3d/data/metrics/kitti_object_eval_python/__init__.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 196
|
2021-10-30T05:15:36.000Z
|
2022-03-30T18:43:40.000Z
|
up/tasks/det_3d/data/metrics/kitti_object_eval_python/__init__.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 12
|
2021-10-30T11:33:28.000Z
|
2022-03-31T14:22:58.000Z
|
up/tasks/det_3d/data/metrics/kitti_object_eval_python/__init__.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 23
|
2021-11-01T07:26:17.000Z
|
2022-03-27T05:55:37.000Z
|
from .eval import * # noqa
from .evaluate import * # noqa
from .kitti_common import * # noqa
| 23.25
| 34
| 0.709677
| 13
| 93
| 5
| 0.538462
| 0.461538
| 0.430769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193548
| 93
| 3
| 35
| 31
| 0.866667
| 0.150538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
efbe0f3cd5bc0ab2f52a5daea94b1b8eaf3a51c4
| 116
|
py
|
Python
|
classtime/brain/scheduling/__init__.py
|
rosshamish/classtime-implementation
|
16e72f0c066b75077dc05cbba290d459348e55c9
|
[
"MIT"
] | 1
|
2017-03-10T21:07:10.000Z
|
2017-03-10T21:07:10.000Z
|
classtime/brain/scheduling/__init__.py
|
rosshamish/classtime-implementation
|
16e72f0c066b75077dc05cbba290d459348e55c9
|
[
"MIT"
] | null | null | null |
classtime/brain/scheduling/__init__.py
|
rosshamish/classtime-implementation
|
16e72f0c066b75077dc05cbba290d459348e55c9
|
[
"MIT"
] | null | null | null |
from .schedule import Schedule
from .schedule import ScheduleScorer
from .schedule_generator import find_schedules
| 23.2
| 46
| 0.862069
| 14
| 116
| 7
| 0.5
| 0.367347
| 0.367347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112069
| 116
| 4
| 47
| 29
| 0.951456
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
efdc3ec2bf3a9e64f01a8432410f91d134826d75
| 89,214
|
py
|
Python
|
romsSection.py
|
fossabot/Ekman
|
2b688ac156159b8499736f4663716252ae90bec9
|
[
"MIT"
] | null | null | null |
romsSection.py
|
fossabot/Ekman
|
2b688ac156159b8499736f4663716252ae90bec9
|
[
"MIT"
] | null | null | null |
romsSection.py
|
fossabot/Ekman
|
2b688ac156159b8499736f4663716252ae90bec9
|
[
"MIT"
] | null | null | null |
"""
Author: Ueslei Adriano Sutil
Created: 08 Apr 2020
Last modified: 06 Jan 2021
Version: 2.12
This file generates a new ROMS output file from scratch.
It is netCDF4 CF-compliant.
WARNING: Do not change anything in this file.
"""
from netCDF4 import Dataset
from setOptions import *
from matplotlib import path
from progress.bar import IncrementalBar
import numpy as np
import time
if romsSST or romsTemp or romsSalt or romsZeta or romsTKE or romsLatent or romsSensible or romsLWRad or romsSWRad or romsEvaporation or romsEminusP or romsUwind or romsVwind or romsW or romsOmega or romsRho == True:
romsMassPoints = True
else:
romsMassPoints = False
if romsU or romsV or romsUbar or romsVbar == True:
romsUVPoints = True
else:
romsUVPoints = False
romsFillVal = 1.e+37
def bbox2ij(lon,lat,romsBox=[-160., -155., 18., 23.]):
"""Return indices for i,j that will completely cover the specified bounding box.
i0,i1,j0,j1 = bbox2ij(lon,lat,romsBox)
lon,lat = 2D arrays that are the target of the subset
romsBox = list containing the bounding box: [lon_min, lon_max, lat_min, lat_max]
Example
-------
>>> i0,i1,j0,j1 = bbox2ij(lon_rho,[-71, -63., 39., 46])
>>> h_subset = nc.variables['h'][j0:j1,i0:i1]
"""
romsBox=np.array(romsBox)
mypath=np.array([romsBox[[0,1,1,0]],romsBox[[2,2,3,3]]]).T
p = path.Path(mypath)
points = np.vstack((lon.flatten(),lat.flatten())).T
n,m = np.shape(lon)
inside = p.contains_points(points).reshape((n,m))
ii,jj = np.meshgrid(range(m),range(n))
return min(ii[inside]),max(ii[inside]),min(jj[inside]),max(jj[inside])
def romsVars(romsOriDir,romsNewDir):
"""
Generates a new ROMS output file from scratch.
"""
# Original output file.
romsRawFile = Dataset(romsOriDir, mode='r')
romsNewFile = Dataset(romsNewDir, 'w', format='NETCDF4')
romsNewFile.title = "ROMS output file made by "+projectAuthor
romsNewFile.description = "Created with Ekman Toolbox in " + time.ctime(time.time())
romsNewFile.link = "https://github.com/uesleisutil/Ekman"
# If a variable on mass point has been chosen.
if romsMassPoints == True:
s_rho = romsRawFile.dimensions['s_rho']
s_w = romsRawFile.dimensions['s_w']
if selectRomsBox == True:
lon_rho = romsRawFile.variables['lon_rho'][:,:]
lat_rho = romsRawFile.variables['lat_rho'][:,:]
i0,i1,j0,j1 = bbox2ij(lon_rho,lat_rho,romsBox)
lon_rho = romsRawFile.variables['lon_rho'][j0:j1, i0:i1]
lat_rho = romsRawFile.variables['lat_rho'][j0:j1, i0:i1]
romsNewFile.createDimension('eta_rho', len(lon_rho[:,0]))
romsNewFile.createDimension('xi_rho', len(lon_rho[0,:]))
print("Bounding box selected. New domain limits are: Longitude "+str(romsBox[0])+"/"+str(romsBox[1])+" and Latitude "+str(romsBox[2])+"/"+str(romsBox[3])+".")
else:
print("No bounding box selected: Using XLAT and XLONG variables from input file.")
lon_rho = romsRawFile.variables['lon_rho'][:,:]
lat_rho = romsRawFile.variables['lat_rho'][:,:]
eta_rho = romsRawFile.dimensions['eta_rho']
xi_rho = romsRawFile.dimensions['xi_rho']
romsNewFile.createDimension('eta_rho', len(eta_rho))
romsNewFile.createDimension('xi_rho', len(xi_rho))
if selectRomsLevel == True:
romsNewFile.createDimension('s_rho', len(romsLevel))
else:
romsNewFile.createDimension('s_rho', len(s_rho))
romsNewFile.createDimension('s_w', len(s_w))
romsNewLon = romsNewFile.createVariable('lon_rho', 'd', ('eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewLon.long_name = 'Longitude on RHO-points'
romsNewLon.units = 'degree_east'
romsNewLon.standard_name = 'longitude'
romsNewLon[:,:] = lon_rho
romsNewLat = romsNewFile.createVariable('lat_rho', 'd', ('eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewLat.long_name = 'Latitude on RHO-points'
romsNewLat.units = 'degree_north'
romsNewLat.standard_name = 'latitude'
romsNewLat[:, :] = lat_rho
# Define vertical levels and time-steps.
levels = len(romsRawFile.variables['s_rho'][:])
if selectRomsLevel == True and len(romsLevel) == 1:
print("One vertical level selected: Working on vertical level "+str(romsLevel)+".")
if selectRomsLevel == True and len(romsLevel) > 1:
print("Multiple vertical levels selected: Working from level "+str(romsLevel[0])+" to "+str(romsLevel[-1])+".")
if selectRomsLevel == False:
print("No selected vertical levels specified: Using entire vertical level from input file.")
if selectRomsTimeStep == True:
ntimes = romsTimeStep
romsNewFile.createDimension('ocean_time', 0)
print("Time-step selected: Working from time-step "+str(ntimes[0])+" to "+str(ntimes[-1])+".")
else:
ntimes = romsRawFile.variables['ocean_time'][:]
ntimes = np.arange(np.argmin(ntimes), len(ntimes))
romsNewFile.createDimension('ocean_time', 0)
print("No time-step selected. Working with entire time-step.")
# If ROMS Sea Surface Temperature has been chosen.
if romsSST == True:
print('Working on ROMS Sea Surface Temperature.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['temp'][ntimes[0]+i,-1,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('sst', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Sea Surface Temperature'
romsNewVar.units = 'Degree Celsius'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['temp'][ntimes[0]+i,-1,j0:j1,i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['temp'][ntimes[0]+i,-1,:,:]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('sst', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Sea Surface Temperature'
romsNewVar.units = 'Degree Celsius'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['temp'][ntimes[0]+i,-1,:,:]
romsNewVar[i,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS Potential Temperature has been chosen.
if romsTemp == True:
print('Working on ROMS Potential Temperature.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['temp'][ntimes[0]+i,romsLevel,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('temp', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Potential Temperature'
romsNewVar.units = 'Degree Celsius'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['temp'][i,romsLevel,j0:j1, i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['temp'][ntimes[0]+i,romsStart:romsStop,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_rho[:,0]), len(lon_rho[0,:])])
romsNewVar = romsNewFile.createVariable('temp', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Potential Temperature'
romsNewVar.units = 'Degree Celsius'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['temp'][ntimes[0]+i,romsStart:romsStop,j0:j1, i0:i1]
romsNewVar[i,:,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['temp'][ntimes[0]+i,:,:,:]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('temp', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Potential Temperature'
romsNewVar.units = 'Degree Celsius'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['temp'][ntimes[0]+i,:,:,:]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == True and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['temp'][ntimes[0]+i,:,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('temp', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Potential Temperature'
romsNewVar.units = 'Degree Celsius'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['temp'][ntimes[0]+i,:,j0:j1, i0:i1]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['temp'][ntimes[0]+i,romsLevel,:, :]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('temp', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Potential Temperature'
romsNewVar.units = 'Degree Celsius'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['temp'][ntimes[0]+i,romsLevel,:, :]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['temp'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_rho[:,0]), len(lon_rho[0,:])])
romsNewVar = romsNewFile.createVariable('temp', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Potential Temperature'
romsNewVar.units = 'Degree Celsius'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['temp'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar[i,:,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS Salinity has been chosen.
if romsSalt == True:
print('Working on ROMS Salinity.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['salt'][ntimes[0]+i,romsLevel,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('salt', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Salinity'
romsNewVar.units = 'PSU'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['salt'][ntimes[0]+i,romsLevel,j0:j1, i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['salt'][ntimes[0]+i,romsStart:romsStop,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_rho[:,0]), len(lon_rho[0,:])])
romsNewVar = romsNewFile.createVariable('salt', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Salinity'
romsNewVar.units = 'PSU'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['salt'][ntimes[0]+i,romsStart:romsStop,j0:j1, i0:i1]
romsNewVar[i,:,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['salt'][ntimes[0]+i,:,:,:]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('salt', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Salinity'
romsNewVar.units = 'PSU'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['salt'][ntimes[0]+i,:,:,:]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == True and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['salt'][ntimes[0]+i,:,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('salt', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Salinity'
romsNewVar.units = 'PSU'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['salt'][ntimes[0]+i,:,j0:j1, i0:i1]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['salt'][ntimes[0]+i,romsLevel,:, :]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('salt', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Salinity'
romsNewVar.units = 'PSU'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['salt'][ntimes[0]+i,romsLevel,:, :]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['salt'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_rho[:,0]), len(lon_rho[0,:])])
romsNewVar = romsNewFile.createVariable('salt', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Salinity'
romsNewVar.units = 'PSU'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['salt'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar[i,:,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS Turbulent Kinectic Energy has been chosen.
if romsTKE == True:
print('Working on ROMS Turbulent Kinectic Energy.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['tke'][ntimes[0]+i,romsLevel,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('tke', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Turbulent Kinectic Energy'
romsNewVar.units = 'm2 s-2'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['tke'][ntimes[0]+i,romsLevel,j0:j1, i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['tke'][ntimes[0]+i,romsStart:romsStop,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_rho[:,0]), len(lon_rho[0,:])])
romsNewVar = romsNewFile.createVariable('tke', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Turbulent Kinectic Energy'
romsNewVar.units = 'm2 s-2'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['tke'][ntimes[0]+i,romsStart:romsStop,j0:j1, i0:i1]
romsNewVar[i,:,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['tke'][ntimes[0]+i,:,:,:]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('tke', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Turbulent Kinectic Energy'
romsNewVar.units = 'm2 s-2'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['tke'][ntimes[0]+i,:,:,:]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == True and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['tke'][ntimes[0]+i,:,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('tke', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Turbulent Kinectic Energy'
romsNewVar.units = 'm2 s-2'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['tke'][ntimes[0]+i,:,j0:j1, i0:i1]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['tke'][ntimes[0]+i,romsLevel,:, :]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('tke', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Turbulent Kinectic Energy'
romsNewVar.units = 'm2 s-2'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['tke'][ntimes[0]+i,romsLevel,:, :]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['tke'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_rho[:,0]), len(lon_rho[0,:])])
romsNewVar = romsNewFile.createVariable('tke', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Turbulent Kinectic Energy'
romsNewVar.units = 'm2 s-2'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['tke'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar[i,:,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS Density Anomaly has been chosen.
if romsRho == True:
print('Working on ROMS Density Anomaly.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['rho'][ntimes[0]+i,romsLevel,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('rho', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Density Anomaly'
romsNewVar.units = 'kilogram meter-3'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['rho'][ntimes[0]+i,romsLevel,j0:j1, i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['rho'][ntimes[0]+i,romsStart:romsStop,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_rho[:,0]), len(lon_rho[0,:])])
romsNewVar = romsNewFile.createVariable('rho', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Density Anomaly'
romsNewVar.units = 'kilogram meter-3'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['rho'][ntimes[0]+i,romsStart:romsStop,j0:j1, i0:i1]
romsNewVar[i,:,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['rho'][ntimes[0]+i,:,:,:]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('rho', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Density Anomaly'
romsNewVar.units = 'kilogram meter-3'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['rho'][ntimes[0]+i,:,:,:]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == True and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['rho'][ntimes[0]+i,:,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('rho', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Density Anomaly'
romsNewVar.units = 'kilogram meter-3'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['rho'][ntimes[0]+i,:,j0:j1, i0:i1]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['rho'][ntimes[0]+i,romsLevel,:, :]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('rho', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Density Anomaly'
romsNewVar.units = 'kilogram meter-3'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['rho'][ntimes[0]+i,romsLevel,:, :]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['rho'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_rho[:,0]), len(lon_rho[0,:])])
romsNewVar = romsNewFile.createVariable('rho', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Density Anomaly'
romsNewVar.units = 'kilogram meter-3'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['rho'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar[i,:,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS Vertical Momentum Component has been chosen.
if romsW == True:
print('Working on ROMS Vertical Momentum Component.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['w'][ntimes[0]+i,romsLevel,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('w', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Vertical Momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['w'][ntimes[0]+i,romsLevel,j0:j1, i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['w'][ntimes[0]+i,romsStart:romsStop,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_rho[:,0]), len(lon_rho[0,:])])
romsNewVar = romsNewFile.createVariable('w', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Vertical Momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['w'][ntimes[0]+i,romsStart:romsStop,j0:j1, i0:i1]
romsNewVar[i,:,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['w'][ntimes[0]+i,:,:,:]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('w', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Vertical Momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['w'][ntimes[0]+i,:,:,:]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == True and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['w'][ntimes[0]+i,:,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('w', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Vertical Momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['w'][ntimes[0]+i,:,j0:j1, i0:i1]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['w'][ntimes[0]+i,romsLevel,:, :]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('w', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Vertical Momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['w'][ntimes[0]+i,romsLevel,:, :]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['w'][i,romsStart:romsStop,:, :]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_rho[:,0]), len(lon_rho[0,:])])
romsNewVar = romsNewFile.createVariable('w', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Vertical Momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['w'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar[i,:,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS S-coordinate Vertical Momentum Component has been chosen.
if romsOmega == True:
print('Working on ROMS S-coordinate Vertical Momentum Component.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['omega'][ntimes[0]+i,romsLevel,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('omega', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'S-coordinate Vertical Momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['omega'][ntimes[0]+i,romsLevel,j0:j1, i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['omega'][ntimes[0]+i,romsStart:romsStop,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_rho[:,0]), len(lon_rho[0,:])])
romsNewVar = romsNewFile.createVariable('omega', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'S-coordinate Vertical Momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['omega'][i,romsStart:romsStop,j0:j1, i0:i1]
romsNewVar[i,:,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['omega'][ntimes[0]+i,:,:,:]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('omega', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'S-coordinate Vertical Momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['omega'][ntimes[0]+i,:,:,:]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == True and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['omega'][ntimes[0]+i,:,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('omega', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'S-coordinate Vertical Momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['omega'][ntimes[0]+i,:,j0:j1, i0:i1]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['omega'][ntimes[0]+i,romsLevel,:, :]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('omega', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'S-coordinate Vertical Momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['omega'][ntimes[0]+i,romsLevel,:, :]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['omega'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_rho[:,0]), len(lon_rho[0,:])])
romsNewVar = romsNewFile.createVariable('omega', 'f', ('ocean_time', 's_rho', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'S-coordinate Vertical Momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['omega'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar[i,:,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS Free-surface has been chosen.
if romsZeta == True:
print('Working on ROMS Free-surface.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['zeta'][ntimes[0]+i,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('zeta', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Free-surface'
romsNewVar.units = 'meters'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['zeta'][ntimes[0]+i,j0:j1,i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['zeta'][ntimes[0]+i,:,:]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('zeta', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Free-surface'
romsNewVar.units = 'meters'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['zeta'][ntimes[0]+i,:,:]
romsNewVar[i,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS Latent Heat Flux has been chosen.
if romsLatent == True:
print('Working on ROMS Latent Heat Flux.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['latent'][ntimes[0]+i,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('latent', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Latent Heat Flux'
romsNewVar.units = 'W m-2'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['latent'][ntimes[0]+i,j0:j1,i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['latent'][ntimes[0]+i,:,:]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('latent', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Latent Heat Flux'
romsNewVar.units = 'W m-2'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['latent'][ntimes[0]+i,:,:]
romsNewVar[i,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS Sensible Heat Flux has been chosen.
if romsSensible == True:
print('Working on ROMS Sensible Heat Flux.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['sensible'][ntimes[0]+i,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('sensible', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Sensible Heat Flux'
romsNewVar.units = 'W m-2'
romsNewVar.negative_value = "Upward flux = Cooling"
romsNewVar.positive_value = "Fownward flux = Heating"
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['sensible'][ntimes[0]+i,j0:j1,i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['sensible'][ntimes[0]+i,:,:]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('sensible', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Sensible Heat Flux'
romsNewVar.units = 'W m-2'
romsNewVar.negative_value = "Upward flux = Cooling"
romsNewVar.positive_value = "Downward flux = Heating"
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['sensible'][ntimes[0]+i,:,:]
romsNewVar[i,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS Net Longwave Radiation Flux has been chosen.
if romsLWRad == True:
print('Working on ROMS Net Longwave Radiation Flux.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['lwrad'][ntimes[0]+i,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('lwrad', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Net Longwave Radiation Flux'
romsNewVar.units = 'W m-2'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['lwrad'][ntimes[0]+i,j0:j1,i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['lwrad'][ntimes[0]+i,:,:]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('lwrad', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Net Longwave Radiation Flux'
romsNewVar.units = 'W m-2'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['lwrad'][ntimes[0]+i,:,:]
romsNewVar[i,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS Net Shortwave Radiation Flux has been chosen.
if romsSWRad == True:
print('Working on ROMS Net Shortwave Radiation Flux.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['swrad'][ntimes[0]+i,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('swrad', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Net Shortwave Radiation Flux'
romsNewVar.units = 'W m-2'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['swrad'][ntimes[0]+i,j0:j1,i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['swrad'][ntimes[0]+i,:,:]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('swrad', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Net Shortwave Radiation Flux'
romsNewVar.units = 'W m-2'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['swrad'][ntimes[0]+i,:,:]
romsNewVar[i,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS Bulk Flux Surface Net Freshwater Flux has been chosen.
if romsEminusP == True:
print('Working on ROMS Bulk Flux Surface Net Freshwater Flux.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['EminusP'][ntimes[0]+i,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('EminusP', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Bulk Flux Surface Net Freshwater Flux'
romsNewVar.units = 'meter s-1'
romsNewVar.negative_value = "Upward = Freshening (Net Precipitation)"
romsNewVar.positive_value = "Downward = Salting (Net Evaporation)"
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['EminusP'][ntimes[0]+i,j0:j1,i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['EminusP'][ntimes[0]+i,:,:]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('EminusP', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Bulk Flux Surface Net Freshwater Flux'
romsNewVar.units = 'meter s-1'
romsNewVar.negative_value = "Upward = Freshening (Net Precipitation)"
romsNewVar.positive_value = "Downward = Salting (Net Evaporation)"
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['EminusP'][ntimes[0]+i,:,:]
romsNewVar[i,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS Evaporation Rate has been chosen.
if romsEvaporation == True:
print('Working on ROMS Evaporation Rate.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['evaporation'][ntimes[0]+i,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('evaporation', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Evaporation Rate'
romsNewVar.units = 'Kg m-2 s-1'
romsNewVar.negative_value = "Downward = Freshening (Condensation)"
romsNewVar.positive_value = "Upward = Salting (Evaporation)"
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['evaporation'][ntimes[0]+i,j0:j1,i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['evaporation'][ntimes[0]+i,:,:]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('evaporation', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Evaporation Rate'
romsNewVar.units = 'Kg m-2 s-1'
romsNewVar.negative_value = "Downward = Freshening (Condensation)"
romsNewVar.positive_value = "Upward = Salting (Evaporation)"
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['evaporation'][ntimes[0]+i,:,:]
romsNewVar[i,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS U-wind Component has been chosen.
if romsUwind == True:
print('Working on ROMS U-wind Component.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['Uwind'][ntimes[0]+i,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('Uwind', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Surface U-wind Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['Uwind'][ntimes[0]+i,j0:j1,i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['Uwind'][ntimes[0]+i,:,:]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('Uwind', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Surface U-wind Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['Uwind'][ntimes[0]+i,:,:]
romsNewVar[i,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS V-wind Component has been chosen.
if romsVwind == True:
print('Working on ROMS V-wind Component.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['Vwind'][ntimes[0]+i,j0:j1, i0:i1]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('Vwind', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Surface V-wind Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['Vwind'][ntimes[0]+i,j0:j1,i0:i1]
romsNewVar[i,:,:] = romsRawVar
else:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['Vwind'][ntimes[0]+i,:,:]
romsNewVar = np.zeros([len(ntimes),len(lat_rho), len(lon_rho)])
romsNewVar = romsNewFile.createVariable('Vwind', 'f', ('ocean_time', 'eta_rho', 'xi_rho'), fill_value=romsFillVal)
romsNewVar.long_name = 'Surface V-wind Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['Vwind'][ntimes[0]+i,:,:]
romsNewVar[i,:,:] = romsRawVar
bar.next()
bar.finish()
elif romsUVPoints == True:
if selectRomsBox == True:
if romsU or romsUbar == True:
lon_u = romsRawFile.variables['lon_u'][:, :]
lat_u = romsRawFile.variables['lat_u'][:, :]
i0_u,i1_u,j0_u,j1_u = bbox2ij(lon_u,lat_u,romsBox)
lon_u = romsRawFile.variables['lon_u'][j0_u:j1_u, i0_u:i1_u]
lat_u = romsRawFile.variables['lat_u'][j0_u:j1_u, i0_u:i1_u]
romsNewFile.createDimension('eta_u', len(lon_u[:,0]))
romsNewFile.createDimension('xi_u', len(lon_u[0,:]))
if romsV or romsVbar == True:
lon_v = romsRawFile.variables['lon_v'][:, :]
lat_v = romsRawFile.variables['lat_v'][:, :]
i0_v,i1_v,j0_v,j1_v = bbox2ij(lon_v,lat_v,romsBox)
lon_v = romsRawFile.variables['lon_v'][j0_v:j1_v, i0_v:i1_v]
lat_v = romsRawFile.variables['lat_v'][j0_v:j1_v, i0_v:i1_v]
romsNewFile.createDimension('eta_v', len(lon_v[:,0]))
romsNewFile.createDimension('xi_v', len(lon_v[0,:]))
print("Bounding box selected. New domain limits are: Longitude "+str(romsBox[0])+"/"+str(romsBox[1])+" and Latitude "+str(romsBox[2])+"/"+str(romsBox[3])+".")
else:
print("No bounding box selected: Using XLAT and XLONG variables from input file.")
if romsU or romsUbar == True:
eta_u = romsRawFile.dimensions['eta_u']
xi_u = romsRawFile.dimensions['xi_u']
lon_u = romsRawFile.variables['lon_u'][:,:]
lat_u = romsRawFile.variables['lat_u'][:,:]
romsNewFile.createDimension('eta_u', len(eta_u))
romsNewFile.createDimension('xi_u', len(xi_u))
if romsV or romsVbar == True:
eta_v = romsRawFile.dimensions['eta_v']
xi_v = romsRawFile.dimensions['xi_v']
lon_v = romsRawFile.variables['lon_v'][:,:]
lat_v = romsRawFile.variables['lat_v'][:,:]
romsNewFile.createDimension('eta_v', len(eta_v))
romsNewFile.createDimension('xi_v', len(xi_v))
if selectRomsLevel == True:
romsNewFile.createDimension('s_rho', len(romsLevel))
else:
s_rho = romsRawFile.dimensions['s_rho']
romsNewFile.createDimension('s_rho', len(s_rho))
# Define vertical levels and time-steps.
levels = len(romsRawFile.variables['s_rho'][:])
if selectRomsTimeStep == True:
ntimes = romsTimeStep
print("Time-step selected: Working from time-step "+str(np.argmin(ntimes))+" to "+str(np.argmax(ntimes))+".")
else:
ntimes = romsRawFile.variables['ocean_time'][:]
print("No time-step selected. Working with entire time-step.")
if selectRomsLevel and len(romsLevel) == 1 and romsU or romsV == True:
print("One vertical level selected: Working on level "+str(romsLevel)+".")
if selectRomsLevel and len(romsLevel) > 1 and romsU or romsV == True:
print("Multiple vertical levels selected: Working from level "+str(romsLevel[0])+" to "+str(romsLevel[-1])+".")
if selectRomsLevel == False and romsU or romsV == True:
print("No selected vertical levels specified: Using entire vertical level from input file.")
s_w = romsRawFile.dimensions['s_w']
romsNewFile.createDimension('s_w', len(s_w))
# Create lat and lon variables.
if romsU or romsUbar == True:
romsNewLonU = romsNewFile.createVariable('lon_u', 'd', ('eta_u', 'xi_u'), fill_value=romsFillVal)
romsNewLonU.long_name = 'Longitude on U-points'
romsNewLonU.units = 'degree_east'
romsNewLonU.standard_name = 'longitude'
romsNewLonU[:, :] = lon_u
romsNewLatU = romsNewFile.createVariable('lat_u', 'd', ('eta_u', 'xi_u'), fill_value=romsFillVal)
romsNewLatU.long_name = 'Latitude on U-points'
romsNewLatU.units = 'degree_north'
romsNewLatU.standard_name = 'latitude'
romsNewLatU[:, :] = lat_u
if romsV or romsVbar == True:
romsNewLonV = romsNewFile.createVariable('lon_v', 'd', ('eta_v', 'xi_v'), fill_value=romsFillVal)
romsNewLonV.long_name = 'Longitude on V-points'
romsNewLonV.units = 'degree_east'
romsNewLonV.standard_name = 'longitude'
romsNewLonV[:, :] = lon_v
romsNewLatV = romsNewFile.createVariable('lat_v', 'd', ('eta_v', 'xi_v'), fill_value=romsFillVal)
romsNewLatV.long_name = 'Latitude on U-points'
romsNewLatV.units = 'degree_north'
romsNewLatV.standard_name = 'latitude'
romsNewLatV[:, :] = lat_v
# If ROMS V-wind Component has been chosen.
if romsV == True:
print('Working on ROMS V-wind Component.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsLevel,j0_v:j1_v, i0_v:i1_v]
romsNewVar = np.zeros([len(ntimes),len(lat_v), len(lon_v)])
romsNewVar = romsNewFile.createVariable('v', 'f', ('ocean_time', 'eta_v', 'xi_v'), fill_value=romsFillVal)
romsNewVar.long_name = 'V-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsLevel,j0_v:j1_v, i0_v:i1_v]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsStart:romsStop,j0_v:j1_v, i0_v:i1_v]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_v[:,0]), len(lon_v[0,:])])
romsNewVar = romsNewFile.createVariable('v', 'f', ('ocean_time', 's_rho', 'eta_v', 'xi_v'), fill_value=romsFillVal)
romsNewVar.long_name = 'V-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsStart:romsStop,j0_v:j1_v, i0_v:i1_v]
romsNewVar[i,:,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,:,:,:]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_v), len(lon_v)])
romsNewVar = romsNewFile.createVariable('v', 'f', ('ocean_time', 's_rho', 'eta_v', 'xi_v'), fill_value=romsFillVal)
romsNewVar.long_name = 'V-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,:,:,:]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == True and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,:,j0_v:j1_v, i0_v:i1_v]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_v), len(lon_v)])
romsNewVar = romsNewFile.createVariable('v', 'f', ('ocean_time', 's_rho', 'eta_v', 'xi_v'), fill_value=romsFillVal)
romsNewVar.long_name = 'V-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,:,j0_v:j1_v, i0_v:i1_v]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsLevel,:, :]
romsNewVar = np.zeros([len(ntimes),len(lat_v), len(lon_v)])
romsNewVar = romsNewFile.createVariable('v', 'f', ('ocean_time', 'eta_v', 'xi_v'), fill_value=romsFillVal)
romsNewVar.long_name = 'V-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsLevel,:, :]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_v[:,0]), len(lon_v[0,:])])
romsNewVar = romsNewFile.createVariable('v', 'f', ('ocean_time', 's_rho', 'eta_v', 'xi_v'), fill_value=romsFillVal)
romsNewVar.long_name = 'V-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar[i,:,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS V-wind Component has been chosen.
if romsU == True:
print('Working on ROMS V-wind Component.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['u'][ntimes[0]+i,romsLevel,j0_u:j1_u, i0_u:i1_u]
romsNewVar = np.zeros([len(ntimes),len(lat_u), len(lon_u)])
romsNewVar = romsNewFile.createVariable('u', 'f', ('ocean_time', 'eta_u', 'xi_u'), fill_value=romsFillVal)
romsNewVar.long_name = 'V-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['u'][ntimes[0]+i,romsLevel,j0_u:j1_u, i0_u:i1_u]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['u'][ntimes[0]+i,romsStart:romsStop,j0_u:j1_u, i0_u:i1_u]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_u[:,0]), len(lon_u[0,:])])
romsNewVar = romsNewFile.createVariable('u', 'f', ('ocean_time', 's_rho', 'eta_u', 'xi_u'), fill_value=romsFillVal)
romsNewVar.long_name = 'V-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['u'][ntimes[0]+i,romsStart:romsStop,j0_u:j1_u, i0_u:i1_u]
romsNewVar[i,:,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['u'][ntimes[0]+i,:,:,:]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_u), len(lon_u)])
romsNewVar = romsNewFile.createVariable('u', 'f', ('ocean_time', 's_rho', 'eta_u', 'xi_u'), fill_value=romsFillVal)
romsNewVar.long_name = 'V-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['u'][ntimes[0]+i,:,:,:]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == True and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['u'][ntimes[0]+i,:,j0_u:j1_u, i0_u:i1_u]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_u), len(lon_u)])
romsNewVar = romsNewFile.createVariable('u', 'f', ('ocean_time', 's_rho', 'eta_u', 'xi_u'), fill_value=romsFillVal)
romsNewVar.long_name = 'V-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['u'][ntimes[0]+i,:,j0_u:j1_u, i0_u:i1_u]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['u'][ntimes[0]+i,romsLevel,:, :]
romsNewVar = np.zeros([len(ntimes),len(lat_u), len(lon_u)])
romsNewVar = romsNewFile.createVariable('u', 'f', ('ocean_time', 'eta_u', 'xi_u'), fill_value=romsFillVal)
romsNewVar.long_name = 'V-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['u'][ntimes[0]+i,romsLevel,:, :]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['u'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_u[:,0]), len(lon_u[0,:])])
romsNewVar = romsNewFile.createVariable('u', 'f', ('ocean_time', 's_rho', 'eta_u', 'xi_u'), fill_value=romsFillVal)
romsNewVar.long_name = 'V-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['u'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar[i,:,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS U-wind Component has been chosen.
if romsV == True:
print('Working on ROMS U-wind Component.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsLevel,j0_v:j1_v, i0_v:i1_v]
romsNewVar = np.zeros([len(ntimes),len(lat_v), len(lon_v)])
romsNewVar = romsNewFile.createVariable('v', 'f', ('ocean_time', 'eta_v', 'xi_v'), fill_value=romsFillVal)
romsNewVar.long_name = 'U-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsLevel,j0_v:j1_v, i0_v:i1_v]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsStart:romsStop,j0_v:j1_v, i0_v:i1_v]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_v[:,0]), len(lon_v[0,:])])
romsNewVar = romsNewFile.createVariable('v', 'f', ('ocean_time', 's_rho', 'eta_v', 'xi_v'), fill_value=romsFillVal)
romsNewVar.long_name = 'U-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsStart:romsStop,j0_v:j1_v, i0_v:i1_v]
romsNewVar[i,:,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,:,:,:]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_v), len(lon_v)])
romsNewVar = romsNewFile.createVariable('v', 'f', ('ocean_time', 's_rho', 'eta_v', 'xi_v'), fill_value=romsFillVal)
romsNewVar.long_name = 'U-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,:,:,:]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == True and selectRomsLevel == False:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,:,j0_v:j1_v, i0_v:i1_v]
romsNewVar = np.zeros([len(ntimes),levels,len(lat_v), len(lon_v)])
romsNewVar = romsNewFile.createVariable('v', 'f', ('ocean_time', 's_rho', 'eta_v', 'xi_v'), fill_value=romsFillVal)
romsNewVar.long_name = 'U-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,:,j0_v:j1_v, i0_v:i1_v]
romsNewVar[i,:,:] = romsRawVar
elif selectRomsBox == False and selectRomsLevel == True:
if len(romsLevel) == 1:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsLevel,:, :]
romsNewVar = np.zeros([len(ntimes),len(lat_v), len(lon_v)])
romsNewVar = romsNewFile.createVariable('v', 'f', ('ocean_time', 'eta_v', 'xi_v'), fill_value=romsFillVal)
romsNewVar.long_name = 'U-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsLevel,:, :]
romsNewVar[i,:,:] = romsRawVar
else:
romsStart = slice(min(romsLevel),max(romsLevel)+1).start
romsStop = slice(min(romsLevel),max(romsLevel)+1).stop
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar = np.zeros([len(ntimes),len(romsLevel),len(lat_v[:,0]), len(lon_v[0,:])])
romsNewVar = romsNewFile.createVariable('v', 'f', ('ocean_time', 's_rho', 'eta_v', 'xi_v'), fill_value=romsFillVal)
romsNewVar.long_name = 'U-wind Component'
romsNewVar.units = 'm s'
romsNewVar[i,:,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['v'][ntimes[0]+i,romsStart:romsStop,:, :]
romsNewVar[i,:,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS Vertically Integrated U-momentum Component has been chosen.
if romsUbar == True:
print('Working on ROMS Vertically Integrated U-momentum Component.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['ubar'][ntimes[0]+i,j0_u:j1_u, i0_u:i1_u]
romsNewVar = np.zeros([len(ntimes),len(lat_u), len(lon_u)])
romsNewVar = romsNewFile.createVariable('ubar', 'f', ('ocean_time', 'eta_u', 'xi_u'), fill_value=romsFillVal)
romsNewVar.long_name = 'Vertically Integrated U-momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['ubar'][ntimes[0]+i,j0_u:j1_u, i0_u:i1_u]
romsNewVar[i,:,:] = romsRawVar
else:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['ubar'][ntimes[0]+i,:,:]
romsNewVar = np.zeros([len(ntimes),len(lat_u), len(lon_u)])
romsNewVar = romsNewFile.createVariable('ubar', 'f', ('ocean_time', 'eta_u', 'xi_u'), fill_value=romsFillVal)
romsNewVar.long_name = 'Vertically Integrated U-momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['ubar'][ntimes[0]+i,:,:]
romsNewVar[i,:,:] = romsRawVar
bar.next()
bar.finish()
# If ROMS Vertically Integrated V-momentum Component has been chosen.
if romsVbar == True:
print('Working on ROMS Vertically Integrated V-momentum Component.')
bar = IncrementalBar(max=len(ntimes))
for i in range(np.argmin(ntimes),len(ntimes),1):
if selectRomsBox == True:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['vbar'][ntimes[0]+i,j0_v:j1_v, i0_v:i1_v]
romsNewVar = np.zeros([len(ntimes),len(lat_v), len(lon_v)])
romsNewVar = romsNewFile.createVariable('vbar', 'f', ('ocean_time', 'eta_u', 'xi_u'), fill_value=romsFillVal)
romsNewVar.long_name = 'Vertically Integrated U-momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['vbar'][ntimes[0]+i,j0_v:j1_v, i0_v:i1_v]
romsNewVar[i,:,:] = romsRawVar
else:
if i == np.argmin(ntimes):
romsRawVar = romsRawFile.variables['vbar'][ntimes[0]+i,:,:]
romsNewVar = np.zeros([len(ntimes),len(lat_v), len(lon_v)])
romsNewVar = romsNewFile.createVariable('vbar', 'f', ('ocean_time', 'eta_u', 'xi_u'), fill_value=romsFillVal)
romsNewVar.long_name = 'Vertically Integrated V-momentum Component'
romsNewVar.units = 'm s-1'
romsNewVar[i,:,:] = romsRawVar
else:
romsRawVar = romsRawFile.variables['vbar'][ntimes[0]+i,:,:]
romsNewVar[i,:,:] = romsRawVar
bar.next()
bar.finish()
| 68.258607
| 215
| 0.475755
| 8,226
| 89,214
| 5.054826
| 0.037199
| 0.085616
| 0.112551
| 0.064934
| 0.925278
| 0.903177
| 0.881893
| 0.869219
| 0.86554
| 0.856545
| 0
| 0.014257
| 0.402482
| 89,214
| 1,307
| 216
| 68.258608
| 0.765772
| 0.026823
| 0
| 0.88245
| 1
| 0
| 0.090362
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001656
| false
| 0
| 0.004967
| 0
| 0.00745
| 0.028974
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4bd723b9202f82d8e384b1f7abfac81278e6ff17
| 2,880
|
py
|
Python
|
wechat_model/_generated_friend.py
|
Cologler/wechat-model-python
|
8d67fbf5db9d3d27428100246011c1113f418971
|
[
"MIT"
] | 1
|
2017-09-10T07:44:31.000Z
|
2017-09-10T07:44:31.000Z
|
wechat_model/_generated_friend.py
|
Cologler/wechat-model-python
|
8d67fbf5db9d3d27428100246011c1113f418971
|
[
"MIT"
] | null | null | null |
wechat_model/_generated_friend.py
|
Cologler/wechat-model-python
|
8d67fbf5db9d3d27428100246011c1113f418971
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017~2999 - cologler <skyoflw@gmail.com>
# ----------
#
# ----------
from ._base import _BaseModel
class _Generated(_BaseModel):
@property
def city(self):
return self._get('City')
@property
def province(self):
return self._get('Province')
@property
def remark_pyinitial(self):
return self._get('RemarkPYInitial')
@property
def star_friend(self):
return self._get('StarFriend')
@property
def user_name(self):
return self._get('UserName')
@property
def display_name(self):
return self._get('DisplayName')
@property
def app_account_flag(self):
return self._get('AppAccountFlag')
@property
def hide_input_bar_flag(self):
return self._get('HideInputBarFlag')
@property
def member_count(self):
return self._get('MemberCount')
@property
def contact_flag(self):
return self._get('ContactFlag')
@property
def encry_chat_room_id(self):
return self._get('EncryChatRoomId')
@property
def head_img_flag(self):
return self._get('HeadImgFlag')
@property
def statues(self):
return self._get('Statues')
@property
def owner_uin(self):
return self._get('OwnerUin')
@property
def alias(self):
return self._get('Alias')
@property
def key_word(self):
return self._get('KeyWord')
@property
def signature(self):
return self._get('Signature')
@property
def chat_room_id(self):
return self._get('ChatRoomId')
@property
def sex(self):
return self._get('Sex')
@property
def remark_name(self):
return self._get('RemarkName')
@property
def is_owner(self):
return self._get('IsOwner')
@property
def uin(self):
return self._get('Uin')
@property
def nick_name(self):
return self._get('NickName')
@property
def attr_status(self):
return self._get('AttrStatus')
@property
def pyinitial(self):
return self._get('PYInitial')
@property
def uni_friend(self):
return self._get('UniFriend')
@property
def member_list(self):
return self._get('MemberList')
@property
def pyquan_pin(self):
return self._get('PYQuanPin')
@property
def head_img_url(self):
return self._get('HeadImgUrl')
@property
def web_wx_plugin_switch(self):
return self._get('WebWxPluginSwitch')
@property
def remark_pyquan_pin(self):
return self._get('RemarkPYQuanPin')
@property
def sns_flag(self):
return self._get('SnsFlag')
@property
def verify_flag(self):
return self._get('VerifyFlag')
class Friend(_Generated):
pass
| 19.726027
| 56
| 0.613889
| 326
| 2,880
| 5.196319
| 0.303681
| 0.214286
| 0.272727
| 0.331169
| 0.268005
| 0.062574
| 0.031877
| 0
| 0
| 0
| 0
| 0.004247
| 0.264236
| 2,880
| 145
| 57
| 19.862069
| 0.795186
| 0.041319
| 0
| 0.320388
| 1
| 0
| 0.115105
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.320388
| false
| 0.009709
| 0.009709
| 0.320388
| 0.669903
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
4be9099098716e5e4f86b06d7192d7a06f7c3903
| 7,012
|
py
|
Python
|
userbot/modules/quotly.py
|
oxyda-fox/XBot-Remix
|
3d97bea5395b223fc89a8cc6cb699cc624ccc967
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/quotly.py
|
oxyda-fox/XBot-Remix
|
3d97bea5395b223fc89a8cc6cb699cc624ccc967
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/quotly.py
|
oxyda-fox/XBot-Remix
|
3d97bea5395b223fc89a8cc6cb699cc624ccc967
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
#Encript Marshal By XVenom
#https://github.com/xvenom15
import marshal
exec(marshal.loads(b'\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00@\x00\x00\x00s\n\x01\x00\x00d\x00d\x01l\x00Z\x00d\x00d\x01l\x01Z\x01d\x00d\x01l\x02Z\x02d\x00d\x01l\x03Z\x03d\x00d\x01l\x04Z\x04d\x00d\x02l\x05m\x06Z\x06\x01\x00d\x00d\x03l\x07m\x08Z\x08\x01\x00d\x00d\x04l\tm\nZ\n\x01\x00d\x00d\x05l\x04m\x0bZ\x0b\x01\x00d\x00d\x06l\x0cm\rZ\r\x01\x00d\x00d\x07l\x0em\x0fZ\x0fm\x10Z\x10m\x11Z\x11\x01\x00d\x00d\x08l\x12m\x13Z\x13\x01\x00d\td\tk\x02r\xd0d\nd\x0bd\x0cd\rd\x0ed\x0fd\x10d\x11d\x12d\x13d\x14d\x15d\x16d\x17d\x18d\x19d\x1ad\x1bd\x1c\x9c\x12Z\x14d\x1dd\x1ed\x1fd d!d"d#d$g\x07d d%\x9c\x03Z\x15e\x13d&d\'d(\x8d\x02d)d*\x84\x00\x83\x01Z\x16e\x13d&d+d(\x8d\x02d,d-\x84\x00\x83\x01Z\x17e\x10\xa0\x18d.d/i\x01\xa1\x01\x01\x00d\x01S\x00)0\xe9\x00\x00\x00\x00N)\x01\xda\x0cTimeoutError)\x01\xda\x05Image)\x01\xda\x07BytesIO)\x01\xda\x06events)\x01\xda\x13YouBlockedUserError)\x03\xda\x03bot\xda\x08CMD_HELP\xda\x10QUOTES_API_TOKEN)\x01\xda\x08register\xe9\x01\x00\x00\x00Z\x06Quotesz\x19API Key/Token for Quotes.z\x13API URL for Quotes.z\x0fUsername colorsz\x1fDefault color for the username.z\x1eYou didn\'t reply to a message.z You didn\'t specify the template.z\x0f</code>, <code>z)Server error. Please report to developer.zCYou\'ve set an invalid token, get it from `http://antiddos.systems`.z\x1fYou\'re unauthorized to do this.z1Wrong template. You can use only the default one.z$Available Templates: <code>{}</code>z&You cannot send stickers in this chat.\xda\x05admin\xda\x07creator\xda\x06hiddenZ\x07Channel)\x12\xda\x04nameZ\x11api_token_cfg_docZ\x0fapi_url_cfg_docZ\x0ecolors_cfg_docZ\x1edefault_username_color_cfg_docZ\x08no_replyZ\x0bno_templateZ\tdelimiterZ\x0cserver_errorZ\rinvalid_tokenZ\x0cunauthorizedZ\x16not_enough_permissionsZ\ttemplatesZ\x14cannot_send_stickersr\x0c\x00\x00\x00r\r\x00\x00\x00r\x0e\x00\x00\x00Z\x07channelz\x1bhttp://api.antiddos.systemsz\x07#fb6169z\x07#faa357z\x07#b48bf2z\x07#85de85z\x07#62d4e3z\x07#65bdf3z\x07#ff5694)\x03Z\x07api_urlZ\x0fusername_colorsZ\x16default_username_colorTz\x04^\\.q)\x02Z\x08outgoingZ\x07patternc\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x0b\x00\x00\x00\xc3\x00\x00\x00s\xbc\x01\x00\x00|\x00j\x00r\nd\x00S\x00|\x00j\x01s |\x00\xa0\x02d\x01\xa1\x01I\x00d\x00H\x00S\x00|\x00\xa0\x03\xa1\x00I\x00d\x00H\x00}\x01|\x01j\x04sD|\x00\xa0\x02d\x02\xa1\x01I\x00d\x00H\x00S\x00d\x03}\x02|\x01j\x05j\x06r`|\x00\xa0\x02d\x01\xa1\x01I\x00d\x00H\x00S\x00|\x00\xa0\x02d\x04\xa1\x01I\x00d\x00H\x00\x01\x00\x90\x01z t\x06\xa0\x07|\x02\xa1\x014\x00I\x00d\x00H\x00\x90\x00\x9a\xfc}\x03zF|\x03\xa0\x08t\tj\nd\x05d\x06d\x07\x8d\x02\xa1\x01}\x04t\x06\xa0\x0b|\x02|\x01\xa1\x02I\x00d\x00H\x00}\x05|\x04I\x00d\x00H\x00}\x04t\x06\xa0\x0c|\x03j\r\xa1\x01I\x00d\x00H\x00\x01\x00W\x00n:\x04\x00t\x0ek\n\x90\x01r\n\x01\x00\x01\x00\x01\x00|\x00\xa0\x0fd\x08\xa1\x01I\x00d\x00H\x00\x06\x00Y\x00W\x00\x02\x005\x00Q\x00I\x00d\x00H\x00R\x00\xa3\x00W\x00S\x00X\x00|\x04j\x04\xa0\x10d\t\xa1\x01\x90\x01r,|\x00\xa0\x02d\n\xa1\x01I\x00d\x00H\x00\x01\x00nT|\x00\xa0\x11\xa1\x00I\x00d\x00H\x00\x01\x00t\x06\xa0\x0b|\x00j\r|\x04j\x12\xa1\x02I\x00d\x00H\x00\x01\x00t\x06\xa0\x0c|\x00j\r\xa1\x01I\x00d\x00H\x00\x01\x00|\x00j\x13\xa0\x14|\x03j\r|\x05j\x15|\x04j\x15g\x02\xa1\x02I\x00d\x00H\x00\x01\x00W\x005\x00Q\x00I\x00d\x00H\x00R\x00X\x00W\x00n$\x04\x00t\x16k\n\x90\x01r\xb6\x01\x00\x01\x00\x01\x00|\x00\xa0\x02\xa1\x00I\x00d\x00H\x00\x01\x00Y\x00n\x02X\x00d\x00S\x00)\x0bNz\x1e```Balas di Pesan Goblok!!.```z\x1d```Balas di Pesan Goblok!!```z\n@QuotLyBotz\x1b```Membuat Sticker......```TicY\x82=)\x02Z\x08incomingZ\nfrom_usersz-```Please unblock @QuotLyBot and try again```z\x03Hi!zD```Can you kindly disable your forward privacy settings for good?```)\x17\xda\x08fwd_fromZ\x0freply_to_msg_id\xda\x04editZ\x11get_reply_message\xda\x04textZ\x06senderr\x07\x00\x00\x00Z\x0cconversationZ\nwait_eventr\x05\x00\x00\x00Z\nNewMessageZ\x10forward_messagesZ\x15send_read_acknowledgeZ\x07chat_idr\x06\x00\x00\x00Z\x05reply\xda\nstartswith\xda\x06delete\xda\x07messageZ\x06clientZ\x0fdelete_messages\xda\x02idr\x02\x00\x00\x00)\x06Z\x05qotliZ\rreply_messageZ\x04chatZ\x04conv\xda\x08response\xda\x03msg\xa9\x00r\x19\x00\x00\x00\xda\x00\xda\x07quotess1\x00\x00\x00sF\x00\x00\x00\x00\x02\x06\x01\x04\x01\x06\x01\x10\x01\x0e\x01\x06\x01\x10\x01\x04\x01\x08\x01\x10\x01\x10\x01\x04\x01\x16\x01\x02\x01\x04\x01\x04\x01\x02\x01\x02\xfe\x04\xff\x04\x04\x12\x01\n\x02\x16\x01\x10\x01*\x01\x0e\x01\x12\x02\x0e\x01\x16\x01\x12\x02\n\x01\n\xff\x1e\x02\x10\x01r\x1b\x00\x00\x00z\x12^.quote(?: |$)(.*)c\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x06\x00\x00\x00\xc3\x00\x00\x00s\xbe\x00\x00\x00|\x00j\x00r\nd\x00S\x00|\x00\xa0\x01d\x01\xa1\x01I\x00d\x00H\x00\x01\x00|\x00j\x02\xa0\x03d\x02\xa1\x01}\x01d\x03\xa0\x04|\x01\xa1\x01}\x02d\x04d\x05i\x01}\x03z\x16t\x05j\x06|\x02|\x03d\x06\x8d\x02\xa0\x07\xa1\x00}\x04W\x00n\x10\x01\x00\x01\x00\x01\x00d\x00}\x04Y\x00n\x02X\x00|\x04d\x00k\tr\x80t\x08\xa0\t|\x04\xa1\x01\xa0\x06d\x07\xa1\x01\xa0\x06d\x08\xa1\x01}\x05n\x04d\x00}\x05|\x05r\xaa|\x00\xa0\x01|\x05\xa0\nd\td\n\xa1\x02\xa0\nd\x0bd\n\xa1\x02\xa1\x01I\x00d\x00H\x00\x01\x00n\x10|\x00\xa0\x01d\x0c\xa1\x01I\x00d\x00H\x00\x01\x00d\x00S\x00)\rNz\rProcessing...r\x0b\x00\x00\x00z>https://bots.shrimadhavuk.me/Telegram/GoodReadsQuotesBot/?q={}z\nUSER-AGENTZ\x07UniBorg)\x01\xda\x07headersZ\x15input_message_contentZ\x0cmessage_textz\x06<code>\xfa\x01`z\x07</code>z\x12Zero results found)\x0br\x10\x00\x00\x00r\x11\x00\x00\x00Z\rpattern_match\xda\x05group\xda\x06format\xda\x08requests\xda\x03get\xda\x04json\xda\x06randomZ\x06choice\xda\x07replace)\x06Z\x05eventZ\rsearch_stringZ\tinput_urlr\x1c\x00\x00\x00r\x17\x00\x00\x00\xda\x06resultr\x19\x00\x00\x00r\x19\x00\x00\x00r\x1a\x00\x00\x00\xda\x0cquote_searchY\x00\x00\x00s \x00\x00\x00\x00\x02\x06\x01\x04\x01\x10\x01\x0c\x01\n\x01\x08\x01\x02\x01\x16\x01\x06\x01\n\x01\x08\x01\x18\x02\x04\x01\x04\x01"\x02r&\x00\x00\x00Z\x06quotlyzU`.q`\nUsage: Enhance ur text to sticker.\n\n`.quote`\nUsage: Enhance ur text to stickers.)\x19r#\x00\x00\x00r \x00\x00\x00\xda\x06base64r"\x00\x00\x00Z\x08telethonZ\x12asyncio.exceptionsr\x02\x00\x00\x00Z\x03PILr\x03\x00\x00\x00\xda\x02ior\x04\x00\x00\x00r\x05\x00\x00\x00Z\x1ctelethon.errors.rpcerrorlistr\x06\x00\x00\x00Z\x07userbotr\x07\x00\x00\x00r\x08\x00\x00\x00r\t\x00\x00\x00Z\x0euserbot.eventsr\n\x00\x00\x00Z\x07stringsZ\x06configr\x1b\x00\x00\x00r&\x00\x00\x00\xda\x06updater\x19\x00\x00\x00r\x19\x00\x00\x00r\x19\x00\x00\x00r\x1a\x00\x00\x00\xda\x08<module>\x08\x00\x00\x00s^\x00\x00\x00\x08\x01\x08\x01\x08\x01\x08\x01\x08\x01\x0c\x02\x0c\x01\x0c\x01\x0c\x01\x0c\x01\x14\x01\x0c\x02\x08\x02\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\x01\x02\xee\x06\x15\x02\x01\x08\x01\x02\x00\x02\x00\x02\xff\x02\x02\x02\xfd\x06\x05\n\x01\n\'\n\x01\n\x15\x04\x01\x02\x01\x02\xfe'))
| 1,753
| 6,941
| 0.780376
| 1,395
| 7,012
| 3.888172
| 0.301075
| 0.127212
| 0.089602
| 0.073009
| 0.264196
| 0.216999
| 0.165929
| 0.123156
| 0.096055
| 0.086099
| 0
| 0.288133
| 0.014546
| 7,012
| 4
| 6,941
| 1,753
| 0.496816
| 0.007416
| 0
| 0
| 0
| 1
| 0.134646
| 0.11151
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
ef18fc86715d9ab2d382322cc7ce5599f25299d5
| 120
|
py
|
Python
|
m_i_a_test.py
|
clicktime-michael/mikeisawesome
|
8fd255de48dbad33dfc7470d002b32a7d20a59b8
|
[
"MIT"
] | null | null | null |
m_i_a_test.py
|
clicktime-michael/mikeisawesome
|
8fd255de48dbad33dfc7470d002b32a7d20a59b8
|
[
"MIT"
] | null | null | null |
m_i_a_test.py
|
clicktime-michael/mikeisawesome
|
8fd255de48dbad33dfc7470d002b32a7d20a59b8
|
[
"MIT"
] | null | null | null |
import michael_is_awesome;
def test_michael_is_awesome():
assert michael_is_awesome.m_i_a() == "Michael is Awesome!"
| 24
| 60
| 0.791667
| 19
| 120
| 4.526316
| 0.526316
| 0.418605
| 0.744186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108333
| 120
| 4
| 61
| 30
| 0.803738
| 0
| 0
| 0
| 0
| 0
| 0.158333
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ef216d03f370b4f27abb746d734858e299434dae
| 31,237
|
py
|
Python
|
electrum/tests/test_transaction.py
|
commerceblock/cb-electrum-wallet
|
a8bb2999ec99fced56311be99a3a4ffabbc14c23
|
[
"MIT"
] | 2
|
2020-05-04T05:42:05.000Z
|
2020-08-01T11:22:30.000Z
|
electrum/tests/test_transaction.py
|
commerceblock/cb-client-wallet
|
a8bb2999ec99fced56311be99a3a4ffabbc14c23
|
[
"MIT"
] | 79
|
2019-04-03T06:56:46.000Z
|
2019-10-11T17:56:43.000Z
|
electrum/tests/test_transaction.py
|
commerceblock/guardnode-wallet
|
dc590742697f335637348513a13347bd0974bc1d
|
[
"MIT"
] | 2
|
2020-05-04T05:48:51.000Z
|
2021-03-25T14:46:25.000Z
|
import unittest
from electrum import transaction
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.keystore import xpubkey_to_address
from electrum.util import bh2u, bfh
from . import SequentialTestCase, TestCaseForTestnet
from .test_bitcoin import needs_test_with_all_ecc_implementations
unsigned_blob = '45505446ff00010000000001f8ddeb9a69819ed38bec0d121a241bbf2465f61263068881a28cc2b71c4b5525010000005701ff4c53ff0488b21e000000000000000000350138c626aac760ea9eedb47287f12c4d783910821c5602d5f8ed933a8f0d95025fb1f45ecb87f2089dc8b0257fc23cc5fd13ae9d4e14c08b0398002d68eae14c00000000feffffff0301a41dc2f5b4e17ec90d88808ff7a4e54e53acce037ff51c093d3f1f57fafd18670100005af3107a4000001976a9140210e63973f9feddf155e5e73ac8f7289549b5f788ac01a41dc2f5b4e17ec90d88808ff7a4e54e53acce037ff51c093d3f1f57fafd18670100016bcc41e8793c001976a9149e327995acc97229c07ce5e75789dab5eb3b689188ac01a41dc2f5b4e17ec90d88808ff7a4e54e53acce037ff51c093d3f1f57fafd18670100000000000086c4000003000000'
signed_blob = '010000000001f8ddeb9a69819ed38bec0d121a241bbf2465f61263068881a28cc2b71c4b5525010000006b483045022100c055b7b07847ee98bce64b22058356efca5b81f8a69f8c2b285669081c58361c02202d14691a6909888fc09e6fb2ab37949de87e0c7d1e72db10d6a2bfbec35fe61b0121031ec67b31750c9ca58b859200267625681d4c9849f8fb163207c4186a273e0b0afeffffff0301a41dc2f5b4e17ec90d88808ff7a4e54e53acce037ff51c093d3f1f57fafd18670100005af3107a4000001976a9140210e63973f9feddf155e5e73ac8f7289549b5f788ac01a41dc2f5b4e17ec90d88808ff7a4e54e53acce037ff51c093d3f1f57fafd18670100016bcc41e8793c001976a9149e327995acc97229c07ce5e75789dab5eb3b689188ac01a41dc2f5b4e17ec90d88808ff7a4e54e53acce037ff51c093d3f1f57fafd18670100000000000086c4000003000000'
signed_blob_signatures = ['3045022100c055b7b07847ee98bce64b22058356efca5b81f8a69f8c2b285669081c58361c02202d14691a6909888fc09e6fb2ab37949de87e0c7d1e72db10d6a2bfbec35fe61b01',]
v2_blob = "0200000000026d88e03db6f5537a1e8ab5e6f5629b9bd3d8cd202ebdd957b2082190b7aecf9e000000006a473044022008430c1563591de0313db6fcbb9bbc1314bc4782ae18cbc4b69fec65a5843a160220079efb70719c75e307f0ac2f7cce8ebd3bb3d4a79eccb7b1fe58df1a0e81f15b0121025980f0aa6b634c1a2c8ae2b01aa257669f436c740ca392a61120e69fc478774bfeffffff6d88e03db6f5537a1e8ab5e6f5629b9bd3d8cd202ebdd957b2082190b7aecf9e010000006a47304402203b7407baee09f20013856e682656fd3b6d7444eddaee40130eaa1d8dddf2dcce02202264c5de2f1422a89d22b3ade2dae0162ea0bc0489384bc94285aeca2c801dc90121021fe5af011813507148fd6b55e1aee4b5e316dada54c4cb448a0839e2a6d55428feffffff0401613d2c1a8ff549ce716a749f5e8e2b123ae1b4b7661bd3a2d731609dada0ff3b0100038d7e8ceefc00001976a914a017fc5aefbcf6cd57044b90c3d85cfbec95c72888ac01613d2c1a8ff549ce716a749f5e8e2b123ae1b4b7661bd3a2d731609dada0ff3b010000000017d78400001976a9140217928daaa582b55e07363cd88a998ab167812088ac0190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a524458001976a91450fc2d2d68e3224e8334ac469f0a2cf6928dd3ca88ac0190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d9724659010000000000002b98000000000000"
signed_segwit_blob = "0200000001010000000000000000000000000000000000000000000000000000000000000000ffffffff03520101ffffffff020190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d9724659010000000000060ab80001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000000000000000266a24aa21a9ed818007e5b371ffd2ddaf01a00a017ac309b1f0dd184fac749babd10505496e8e000000000000012000000000000000000000000000000000000000000000000000000000000000000000000000"
class TestBCDataStream(SequentialTestCase):
def test_compact_size(self):
s = transaction.BCDataStream()
values = [0, 1, 252, 253, 2**16-1, 2**16, 2**32-1, 2**32, 2**64-1]
for v in values:
s.write_compact_size(v)
with self.assertRaises(transaction.SerializationError):
s.write_compact_size(-1)
self.assertEqual(bh2u(s.input),
'0001fcfdfd00fdfffffe00000100feffffffffff0000000001000000ffffffffffffffffff')
for v in values:
self.assertEqual(s.read_compact_size(), v)
with self.assertRaises(transaction.SerializationError):
s.read_compact_size()
def test_string(self):
s = transaction.BCDataStream()
with self.assertRaises(transaction.SerializationError):
s.read_string()
msgs = ['Hello', ' ', 'World', '', '!']
for msg in msgs:
s.write_string(msg)
for msg in msgs:
self.assertEqual(s.read_string(), msg)
with self.assertRaises(transaction.SerializationError):
s.read_string()
def test_bytes(self):
s = transaction.BCDataStream()
s.write(b'foobar')
self.assertEqual(s.read_bytes(3), b'foo')
self.assertEqual(s.read_bytes(2), b'ba')
self.assertEqual(s.read_bytes(4), b'r')
self.assertEqual(s.read_bytes(1), b'')
class TestTransaction(SequentialTestCase):
@needs_test_with_all_ecc_implementations
def test_tx_unsigned(self):
self.maxDiff = None
expected = {
'inputs': [{
'type': 'p2pkh',
'address': '14iRdacqJ95JffkUFUTUoZmHCUkq21UMAZ',
'issuance': None,
'num_sig': 1,
'prevout_hash': '25554b1cb7c28ca28188066312f66524bf1b241a120dec8bd39e81699aebddf8',
'prevout_n': 1,
'pubkeys': ['031ec67b31750c9ca58b859200267625681d4c9849f8fb163207c4186a273e0b0a'],
'scriptSig': '01ff4c53ff0488b21e000000000000000000350138c626aac760ea9eedb47287f12c4d783910821c5602d5f8ed933a8f0d95025fb1f45ecb87f2089dc8b0257fc23cc5fd13ae9d4e14c08b0398002d68eae14c00000000',
'sequence': 4294967294,
'signatures': [None],
'x_pubkeys': ['ff0488b21e000000000000000000350138c626aac760ea9eedb47287f12c4d783910821c5602d5f8ed933a8f0d95025fb1f45ecb87f2089dc8b0257fc23cc5fd13ae9d4e14c08b0398002d68eae14c00000000']}],
'lockTime': 3,
'outputs': [{
'address': '1BvbZykUE5oS5ACH5U4mhwE5KdJPHson7',
'asset': '6718fdfa571f3f3d091cf57f03ceac534ee5a4f78f80880dc97ee1b4f5c21da4',
'asset_version': 1,
'nonce': None,
'nonce_version': 0,
'prevout_n': 0,
'scriptPubKey': '76a9140210e63973f9feddf155e5e73ac8f7289549b5f788ac',
'range_proof': None,
'surjection_proof': None,
'type': TYPE_ADDRESS,
'value': 100000000000000,
'value_version': 1},
{
'address': '1FRUENS6LR8JdwEoptZwjRA1c64WDgcsac',
'asset': '6718fdfa571f3f3d091cf57f03ceac534ee5a4f78f80880dc97ee1b4f5c21da4',
'asset_version': 1,
'nonce': None,
'nonce_version': 0,
'prevout_n': 1,
'scriptPubKey': '76a9149e327995acc97229c07ce5e75789dab5eb3b689188ac',
'range_proof': None,
'surjection_proof': None,
'type': TYPE_ADDRESS,
'value': 399999999965500,
'value_version': 1},
{
'address': '',
'asset': '6718fdfa571f3f3d091cf57f03ceac534ee5a4f78f80880dc97ee1b4f5c21da4',
'asset_version': 1,
'nonce': None,
'nonce_version': 0,
'prevout_n': 2,
'scriptPubKey': '',
'range_proof': None,
'surjection_proof': None,
'type': TYPE_SCRIPT,
'value': 34500,
'value_version': 1}],
'partial': True,
'segwit_ser': False,
'version': 1,
}
tx = transaction.Transaction(unsigned_blob)
self.assertEqual(tx.deserialize(), expected)
self.assertEqual(tx.deserialize(), None)
self.assertEqual(tx.as_dict(), {'hex': unsigned_blob, 'complete': False, 'final': True})
self.assertEqual(tx.get_outputs(), [('1BvbZykUE5oS5ACH5U4mhwE5KdJPHson7', 100000000000000, '6718fdfa571f3f3d091cf57f03ceac534ee5a4f78f80880dc97ee1b4f5c21da4'), ('1FRUENS6LR8JdwEoptZwjRA1c64WDgcsac', 399999999965500, '6718fdfa571f3f3d091cf57f03ceac534ee5a4f78f80880dc97ee1b4f5c21da4'), ('SCRIPT ', 34500, '6718fdfa571f3f3d091cf57f03ceac534ee5a4f78f80880dc97ee1b4f5c21da4')])
self.assertEqual(tx.get_output_addresses(), ['1BvbZykUE5oS5ACH5U4mhwE5KdJPHson7', '1FRUENS6LR8JdwEoptZwjRA1c64WDgcsac', 'SCRIPT '])
self.assertTrue(tx.has_address('1BvbZykUE5oS5ACH5U4mhwE5KdJPHson7'))
self.assertTrue(tx.has_address('1FRUENS6LR8JdwEoptZwjRA1c64WDgcsac'))
self.assertFalse(tx.has_address('1FRUENS6LR8JdwEoptZwjRA1c64WDgcsab'))
self.assertEqual(tx.serialize(), unsigned_blob)
tx.update_signatures(signed_blob_signatures)
self.assertEqual(tx.raw, signed_blob)
tx.update(unsigned_blob)
tx.raw = None
blob = str(tx)
self.assertEqual(transaction.deserialize(blob), expected)
@needs_test_with_all_ecc_implementations
def test_tx_signed(self):
self.maxDiff=None
expected = {
'inputs': [{
'type': 'unknown',
'address': None,
'issuance': None,
'num_sig': 0,
'prevout_hash': '25554b1cb7c28ca28188066312f66524bf1b241a120dec8bd39e81699aebddf8',
'prevout_n': 1,
'scriptSig': '483045022100c055b7b07847ee98bce64b22058356efca5b81f8a69f8c2b285669081c58361c02202d14691a6909888fc09e6fb2ab37949de87e0c7d1e72db10d6a2bfbec35fe61b0121031ec67b31750c9ca58b859200267625681d4c9849f8fb163207c4186a273e0b0a',
'sequence': 4294967294}],
'lockTime': 3,
'outputs': [
{
'address': '1BvbZykUE5oS5ACH5U4mhwE5KdJPHson7',
'asset': '6718fdfa571f3f3d091cf57f03ceac534ee5a4f78f80880dc97ee1b4f5c21da4',
'asset_version': 1,
'nonce': None,
'nonce_version': 0,
'prevout_n': 0,
'scriptPubKey': '76a9140210e63973f9feddf155e5e73ac8f7289549b5f788ac',
'range_proof': None,
'surjection_proof': None,
'type': TYPE_ADDRESS,
'value': 100000000000000,
'value_version': 1},
{
'address': '1FRUENS6LR8JdwEoptZwjRA1c64WDgcsac',
'asset': '6718fdfa571f3f3d091cf57f03ceac534ee5a4f78f80880dc97ee1b4f5c21da4',
'asset_version': 1,
'nonce': None,
'nonce_version': 0,
'prevout_n': 1,
'scriptPubKey': '76a9149e327995acc97229c07ce5e75789dab5eb3b689188ac',
'range_proof': None,
'surjection_proof': None,
'type': TYPE_ADDRESS,
'value': 399999999965500,
'value_version': 1},
{
'address': '',
'asset': '6718fdfa571f3f3d091cf57f03ceac534ee5a4f78f80880dc97ee1b4f5c21da4',
'asset_version': 1,
'nonce': None,
'nonce_version': 0,
'prevout_n': 2,
'scriptPubKey': '',
'range_proof': None,
'surjection_proof': None,
'type': TYPE_SCRIPT,
'value': 34500,
'value_version': 1},
],
'partial': False,
'segwit_ser': False,
'version': 1,
}
tx = transaction.Transaction(signed_blob)
self.assertEqual(tx.deserialize(), expected)
self.assertEqual(tx.deserialize(), None)
self.assertEqual(tx.as_dict(), {'hex': signed_blob, 'complete': True, 'final': True})
self.assertEqual(tx.serialize(), signed_blob)
tx.update_signatures(signed_blob_signatures)
self.assertEqual(tx.estimated_total_size(), 341)
self.assertEqual(tx.estimated_base_size(), 341)
self.assertEqual(tx.estimated_witness_size(), 0)
self.assertEqual(tx.estimated_weight(), 1364)
self.assertEqual(tx.estimated_size(), 341)
def test_estimated_output_size(self):
estimated_output_size = transaction.Transaction.estimated_output_size
self.assertEqual(estimated_output_size('14gcRovpkCoGkCNBivQBvw7eso7eiNAbxG'), 34)
self.assertEqual(estimated_output_size('35ZqQJcBQMZ1rsv8aSuJ2wkC7ohUCQMJbT'), 32)
self.assertEqual(estimated_output_size('bc1q3g5tmkmlvxryhh843v4dz026avatc0zzr6h3af'), 31)
self.assertEqual(estimated_output_size('bc1qnvks7gfdu72de8qv6q6rhkkzu70fqz4wpjzuxjf6aydsx7wxfwcqnlxuv3'), 43)
# TODO other tests for segwit tx
def test_tx_signed_segwit(self):
tx = transaction.Transaction(signed_segwit_blob)
self.assertEqual(tx.estimated_total_size(), 223)
self.assertEqual(tx.estimated_base_size(), 182)
self.assertEqual(tx.estimated_witness_size(), 41)
self.assertEqual(tx.estimated_weight(), 769)
self.assertEqual(tx.estimated_size(), 193)
def test_errors(self):
with self.assertRaises(TypeError):
transaction.Transaction.pay_script(output_type=None, addr='')
with self.assertRaises(BaseException):
xpubkey_to_address('')
def test_parse_xpub(self):
res = xpubkey_to_address('fe4e13b0f311a55b8a5db9a32e959da9f011b131019d4cebe6141b9e2c93edcbfc0954c358b062a9f94111548e50bde5847a3096b8b7872dcffadb0e9579b9017b01000200')
self.assertEqual(res, ('04ee98d63800824486a1cf5b4376f2f574d86e0a3009a6448105703453f3368e8e1d8d090aaecdd626a45cc49876709a3bbb6dc96a4311b3cac03e225df5f63dfc', '19h943e4diLc68GXW7G75QNe2KWuMu7BaJ'))
def test_version_field(self):
tx = transaction.Transaction(v2_blob)
self.assertEqual(tx.txid(), "7201a219a30af1303e4c17ab15a02e2d9c6fbfcd162403d5d171f293fa7901ce")
def test_get_address_from_output_script(self):
# the inverse of this test is in test_bitcoin: test_address_to_script
addr_from_script = lambda script: transaction.get_address_from_output_script(bfh(script))
ADDR = transaction.TYPE_ADDRESS
# bech32 native segwit
# test vectors from BIP-0173
self.assertEqual((ADDR, 'bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4'), addr_from_script('0014751e76e8199196d454941c45d1b3a323f1433bd6'))
self.assertEqual((ADDR, 'bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx'), addr_from_script('5128751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6'))
self.assertEqual((ADDR, 'bc1sw50qa3jx3s'), addr_from_script('6002751e'))
self.assertEqual((ADDR, 'bc1zw508d6qejxtdg4y5r3zarvaryvg6kdaj'), addr_from_script('5210751e76e8199196d454941c45d1b3a323'))
# base58 p2pkh
self.assertEqual((ADDR, '14gcRovpkCoGkCNBivQBvw7eso7eiNAbxG'), addr_from_script('76a91428662c67561b95c79d2257d2a93d9d151c977e9188ac'))
self.assertEqual((ADDR, '1BEqfzh4Y3zzLosfGhw1AsqbEKVW6e1qHv'), addr_from_script('76a914704f4b81cadb7bf7e68c08cd3657220f680f863c88ac'))
self.assertEqual((ADDR, '18u8VTYhogvwek9rUQRtHKn66Sf6a2RV5w'), addr_from_script('76a91456a4c36cd1fdb71a493fec9941b69b4a7cec90ea88ac'))
# base58 p2sh
self.assertEqual((ADDR, '35ZqQJcBQMZ1rsv8aSuJ2wkC7ohUCQMJbT'), addr_from_script('a9142a84cf00d47f699ee7bbc1dea5ec1bdecb4ac15487'))
self.assertEqual((ADDR, '3PyjzJ3im7f7bcV724GR57edKDqoZvH7Ji'), addr_from_script('a914f47c8954e421031ad04ecd8e7752c9479206b9d387'))
#####
def _run_naive_tests_on_tx(self, raw_tx, txid):
tx = transaction.Transaction(raw_tx)
self.assertEqual(txid, tx.txid())
self.assertEqual(raw_tx, tx.serialize())
self.assertTrue(tx.estimated_size() >= 0)
def test_txid_ocean_1(self):
raw_tx = '020000000003709afab77f27e44f86ba8fbe98eae07bf5a2c789aba842c1f5074571f3ca01e8000000006a473044022043909dcd53d1f29cbe85379d613aaed4365b8c2caefac19710835befa246ac9b022071f0b9535f40f2302ec3b078658779a6bb90d1bf1335c57c2665e1b614cdca6f012102877b934f94f2a3526f6f8d3463200ed18d0db3805ddfa9b95c49b8f3b4c5f9f5feffffff9929d283448860b9c8f878f72b9498740ce5eac04c6f889c928e919ddad9e334000000006a47304402207f7a3e18b7c1cd23faf84113f9e3529efbd905bb769bdae89bbb0ba48bebdde702204eefc801b4a66d0b53b8a1593f5b9b22263b6990938b35c11f222dd8065487c2012103cecc35686372cced9792776ab3894686252b6db17892d7821ac6dc889e578ed1feffffff9929d283448860b9c8f878f72b9498740ce5eac04c6f889c928e919ddad9e334010000006a47304402203bd5fbc2c4c24e3c8dacb2cb88b4d250453a437c91ff57533b8132b0993c26f202205440a629830ad74b6723ee09bb595ab52a52b4bab1f9a2280ce01833b2e4b91c01210218868f2c88bbc2b83897e16f49e7175c2acb77f8afe48f3a01b65d975f3a7cc1feffffff0401ed8c7bbf7a3d0bca6f342917b5ee3230e449d679371e1253c9098f7a49edad2a010000000017d78400001976a91459a23f87599b762aac26ff3bdcb35a4a6fb2431588ac0190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a51e198001976a914e99690c5b0e28056f91d50c0edf77cea3fd411a088ac01ed8c7bbf7a3d0bca6f342917b5ee3230e449d679371e1253c9098f7a49edad2a010000000029b92700001976a9140d90d12a0d6da5f948fc8cfc2e8224af5c8fa4d588ac0190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d9724659010000000000003728000002000000'
txid = 'c4e6658adf0bb20ec82cc295723ff5a5b6531460b04048a6b023f496902a44a3'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_coinbase_segwit_ocean_2(self):
raw_tx = '0200000001010000000000000000000000000000000000000000000000000000000000000000ffffffff03520101ffffffff020190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d9724659010000000000060ab80001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000000000000000266a24aa21a9ed818007e5b371ffd2ddaf01a00a017ac309b1f0dd184fac749babd10505496e8e000000000000012000000000000000000000000000000000000000000000000000000000000000000000000000'
txid = '55620ef3fddaa94eff3ea160f54e167b11a80d662d4ee26bf53c3fa28b647589'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_issuance_ocean_3(self):
raw_tx = '020000000001ca983f7957320e7721424a10335ffdd7cb13b564eb5dca3b296b11e1d0e8ae0a1e00008000feffffff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100038d7ea4c6800000030190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a526ff0001976a914b6abccfdb3e6c6a7f2e60e691ecbf480d3349c3e88ac01613d2c1a8ff549ce716a749f5e8e2b123ae1b4b7661bd3a2d731609dada0ff3b0100038d7ea4c68000001976a914c2a33ae4acdef0a30fa15efbfbbc77989d3dd97988ac0190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d9724659010000000000001810000000000000'
txid = '9ecfaeb7902108b257d9bd2e20cdd8d39b9b62f5e6b58a1e7a53f5b63de0886d'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_initial_issuance_ocean_4(self):
raw_tx = '0100000000013db482a0a84809ef146d2dfd133a6d7028116e0c3d502c0f9a5472b157b8eecd0000008000ffffffff000000000000000000000000000000000000000000000000000000000000000006226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f0100002d79883d2000010000000000000000640190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a5288000001510190f6212d141349050aca026eeb6e53a037bfaf5e0383deae7b9a5139d972465901000000746a52880000015100000000'
txid = '0aaee8d0e1116b293bca5deb64b513cbd7fd5f33104a4221770e3257793f98ca'
self._run_naive_tests_on_tx(raw_tx, txid)
class TestTransactionTestnet(TestCaseForTestnet):
def _run_naive_tests_on_tx(self, raw_tx, txid):
tx = transaction.Transaction(raw_tx)
self.assertEqual(txid, tx.txid())
self.assertEqual(raw_tx, tx.serialize())
self.assertTrue(tx.estimated_size() >= 0)
# partial txns using our partial format --->
# NOTE: our partial format contains xpubs, and xpubs have version bytes,
# and version bytes encode the network as well; so these are network-sensitive!
'''
def test_txid_partial_segwit_p2wpkh(self):
raw_tx = '45505446ff000100000000010115a847356cbb44be67f345965bb3f2589e2fec1c9a0ada21fd28225dcc602e8f0100000000fdffffff02f6fd1200000000001600149c756aa33f4f89418b33872a973274b5445c727b80969800000000001600140f9de573bc679d040e763d13f0250bd03e625f6ffeffffffff9095ab000000000000000201ff53ff045f1cf6014af5fa07800000002fa3f450ba41799b9b62642979505817783a9b6c656dc11cd0bb4fa362096808026adc616c25a4d0a877d1741eb1db9cef65c15118bd7d5f31bf65f319edda81840100c8000f391400'
txid = '63ff7e99d85d8e33f683e6ec84574bdf8f5111078a5fe900893e019f9a7f95c3'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_partial_segwit_p2wpkh_p2sh_simple(self):
raw_tx = '45505446ff0001000000000101d0d23a6fbddb21cc664cb81cca96715baa4d6dbe5b7b9bcc6632f1005a7b0b840100000017160014a78a91261e71a681b6312cd184b14503a21f856afdffffff0134410f000000000017a914d6514ca17ecc31952c990daf96e307fbc58529cd87feffffffff40420f000000000000000201ff53ff044a5262033601222e800000001618aa51e49a961f63fd111f64cd4a7e792c1d7168be7a07703de505ebed2cf70286ebbe755767adaa5835f4d78dec1ee30849d69eacfe80b7ee6b1585279536c30000020011391400'
txid = '2739f2e7fde9b8ec73fce4aee53722cc7683312d1321ded073284c51fadf44df'
self._run_naive_tests_on_tx(raw_tx, txid)
def test_txid_partial_segwit_p2wpkh_p2sh_mixed_outputs(self):
raw_tx = '45505446ff00010000000001011dcac788f24b84d771b60c44e1f9b6b83429e50f06e1472d47241922164013b00100000017160014801d28ca6e2bde551112031b6cb75de34f10851ffdffffff0440420f00000000001600140f9de573bc679d040e763d13f0250bd03e625f6fc0c62d000000000017a9142899f6484e477233ce60072fc185ef4c1f2c654487809698000000000017a914d40f85ba3c8fa0f3615bcfa5d6603e36dfc613ef87712d19040000000017a914e38c0cffde769cb65e72cda1c234052ae8d2254187feffffffff6ad1ee040000000000000201ff53ff044a5262033601222e800000001618aa51e49a961f63fd111f64cd4a7e792c1d7168be7a07703de505ebed2cf70286ebbe755767adaa5835f4d78dec1ee30849d69eacfe80b7ee6b1585279536c301000c000f391400'
txid = 'ba5c88e07a4025a39ad3b85247cbd4f556a70d6312b18e04513c7cec9d45d6ac'
self._run_naive_tests_on_tx(raw_tx, txid)
'''
# end partial txns <---
class NetworkMock(object):
def __init__(self, unspent):
self.unspent = unspent
def synchronous_send(self, arg):
return self.unspent
| 93.804805
| 9,287
| 0.838173
| 1,210
| 31,237
| 21.382645
| 0.207438
| 0.027828
| 0.014455
| 0.010049
| 0.154369
| 0.134426
| 0.11769
| 0.114676
| 0.106482
| 0.096046
| 0
| 0.506758
| 0.116528
| 31,237
| 332
| 9,288
| 94.087349
| 0.430771
| 0.012325
| 0
| 0.478764
| 0
| 0
| 0.673393
| 0.637637
| 0
| 1
| 0
| 0.003012
| 0.227799
| 1
| 0.073359
| false
| 0
| 0.027027
| 0.003861
| 0.119691
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
32b5b6dcdaad495d0f7225bd6511c4bd7c2d4e35
| 147
|
py
|
Python
|
fpn/symbols/__init__.py
|
qilei123/fpn_crop
|
641d06486b1422225443a9ac3c4b60ae9fb91b10
|
[
"MIT"
] | 1
|
2019-12-17T09:20:29.000Z
|
2019-12-17T09:20:29.000Z
|
fpn/symbols/__init__.py
|
qilei123/fpn_crop
|
641d06486b1422225443a9ac3c4b60ae9fb91b10
|
[
"MIT"
] | null | null | null |
fpn/symbols/__init__.py
|
qilei123/fpn_crop
|
641d06486b1422225443a9ac3c4b60ae9fb91b10
|
[
"MIT"
] | null | null | null |
import resnet_v1_101_fpn_rcnn
import resnet_v1_101_fpn_dcn_rcnn
import resnet_v1_101_fpn_rcnn_l1_focal
import resnet_v1_101_fpn_rcnn_l1_focal_test
| 29.4
| 43
| 0.945578
| 30
| 147
| 3.9
| 0.333333
| 0.410256
| 0.478632
| 0.581197
| 0.940171
| 0.940171
| 0.529915
| 0.529915
| 0
| 0
| 0
| 0.129496
| 0.054422
| 147
| 4
| 44
| 36.75
| 0.71223
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
3eed34084843d73e8b1a004a72b9dbffa766b150
| 18
|
py
|
Python
|
Chapter 02/ch2_1.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
Chapter 02/ch2_1.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
Chapter 02/ch2_1.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
print(abs(-24.75))
| 18
| 18
| 0.666667
| 4
| 18
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 0
| 18
| 1
| 18
| 18
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
41017983ad0239a6f751765a6f49dedb933706fa
| 6,426
|
py
|
Python
|
tests/test_installdeps_config.py
|
boborot/python-screwdrivercd
|
c4f1165741c8af7a47c126ee4abc5504c350a77f
|
[
"Apache-2.0"
] | 6
|
2019-12-31T21:49:07.000Z
|
2021-06-22T21:50:32.000Z
|
tests/test_installdeps_config.py
|
boborot/python-screwdrivercd
|
c4f1165741c8af7a47c126ee4abc5504c350a77f
|
[
"Apache-2.0"
] | 24
|
2019-11-19T00:35:14.000Z
|
2021-03-27T16:55:37.000Z
|
tests/test_installdeps_config.py
|
boborot/python-screwdrivercd
|
c4f1165741c8af7a47c126ee4abc5504c350a77f
|
[
"Apache-2.0"
] | 10
|
2019-12-09T19:14:54.000Z
|
2021-10-03T06:17:52.000Z
|
#!/usr/bin/env python
# Copyright 2019, Oath Inc.
# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms
import json
import logging
logging.basicConfig(level=logging.DEBUG)
import unittest
from screwdrivercd.installdeps.config import Configuration
from screwdrivercd.utility.contextmanagers import InTemporaryDirectory
TEST_CONFIG = '''[build-system]
requires = ["setuptools", "wheel"] # PEP 508 specifications.
[tool.sdv4_installdeps]
install = ['apk', 'apt-get', 'yinst', 'yum', 'pip3']
[tool.sdv4_installdeps.apk]
deps = [
'python3',
'mysql-client'
]
[tool.sdv4_installdeps.apt-get]
deps = [
'python3',
'mysql-client'
]
repos = {}
[tool.sdv4_installdeps.yum]
repos.verizon_python_rpms = "https://edge.artifactory.yahoo.com:4443/artifactory/python_rpms/python_rpms.repo"
deps = [
'yahoo_python36;distro_version<"7.5',
'yahoo_python37;distro_version>="7.5"',
'mysql;distro_version<"7"',
'mariadb;distro_version>="7"'
]
[tool.sdv4_installdeps.yinst]
deps = [
'python36',
'dist_utils'
]
deps_stable = []
deps_current = []
deps_test = []
deps_quarantine = []
[tool.sdv4_installdeps.pip3]
bin_dir = ''
deps = []
repos = {}
'''
class TestConfig(unittest.TestCase):
def setUp(self):
super(TestConfig, self).setUp()
def test__configuration__defaults__no_config(self):
with InTemporaryDirectory():
result = Configuration()
self.assertListEqual(result.configuration['apk']['deps'], [])
self.assertListEqual(result.configuration['apt-get']['deps'], [])
self.assertListEqual(result.configuration['install'], ['apk', 'apt-get', 'yinst', 'yum', 'pip3'])
self.assertListEqual(result.configuration['yinst']['deps'], [])
self.assertListEqual(result.configuration['yum']['deps'], [])
self.assertListEqual(result.configuration['pip3']['deps'], [])
def test__configuration__no_tool_configs(self):
with InTemporaryDirectory():
with open('pyproject.toml', 'w') as file_handle:
file_handle.write('[build-system]\nrequires = ["setuptools", "wheel"] # PEP 508 specifications.')
result = Configuration('pyproject.toml')
self.assertListEqual(result.configuration['apk']['deps'], [])
self.assertListEqual(result.configuration['apt-get']['deps'], [])
self.assertListEqual(result.configuration['install'], ['apk', 'apt-get', 'yinst', 'yum', 'pip3'])
self.assertListEqual(result.configuration['yinst']['deps'], [])
self.assertListEqual(result.configuration['yum']['deps'], [])
self.assertListEqual(result.configuration['pip3']['deps'], [])
def test__configuration__invalid_filename(self):
with InTemporaryDirectory():
with open('pyproject.toml', 'w') as file_handle:
file_handle.write('[build-system]\nrequires = ["setuptools", "wheel"] # PEP 508 specifications.')
result = Configuration('pyprojectt.toml')
self.assertListEqual(result.configuration['apk']['deps'], [])
self.assertListEqual(result.configuration['apt-get']['deps'], [])
self.assertListEqual(result.configuration['install'], ['apk', 'apt-get', 'yinst', 'yum', 'pip3'])
self.assertListEqual(result.configuration['yinst']['deps'], [])
self.assertListEqual(result.configuration['yum']['deps'], [])
self.assertListEqual(result.configuration['pip3']['deps'], [])
def test__configuration__no_sdv4_installdeps_configs(self):
with InTemporaryDirectory():
with open('pyproject.toml', 'w') as file_handle:
file_handle.write('[build-system]\nrequires = ["setuptools", "wheel"] # PEP 508 specifications.\n[tool.foo]\ninstall = ["apk", "apt-get", "yinst", "yum", "pip3"]')
result = Configuration()
self.assertListEqual(result.configuration['apk']['deps'], [])
self.assertListEqual(result.configuration['apt-get']['deps'], [])
self.assertListEqual(result.configuration['install'], ['apk', 'apt-get', 'yinst', 'yum', 'pip3'])
self.assertListEqual(result.configuration['yinst']['deps'], [])
self.assertListEqual(result.configuration['yum']['deps'], [])
self.assertListEqual(result.configuration['pip3']['deps'], [])
def test__configuration__test__deps(self):
with InTemporaryDirectory():
with open('pyproject.toml', 'w') as file_handle:
file_handle.write(TEST_CONFIG)
result = Configuration()
self.assertListEqual(result.configuration['apk']['deps'], ['python3', 'mysql-client'])
self.assertListEqual(result.configuration['apt-get']['deps'], ['python3', 'mysql-client'])
self.assertListEqual(result.configuration['yinst']['deps'], ['python36', 'dist_utils'])
self.assertListEqual(result.configuration['yum']['deps'], ['yahoo_python36;distro_version<"7.5', 'yahoo_python37;distro_version>="7.5"', 'mysql;distro_version<"7"', 'mariadb;distro_version>="7"'])
self.assertListEqual(result.configuration['pip3']['deps'], [])
def test__configuration__test__deps__scrwdrivercd_installdeps(self):
with InTemporaryDirectory():
with open('pyproject.toml', 'w') as file_handle:
file_handle.write(TEST_CONFIG.replace('sdv4_installdeps', 'screwdrivercd_installdeps'))
result = Configuration()
self.assertListEqual(result.configuration['apk']['deps'], ['python3', 'mysql-client'])
self.assertListEqual(result.configuration['apt-get']['deps'], ['python3', 'mysql-client'])
self.assertListEqual(result.configuration['yinst']['deps'], ['python36', 'dist_utils'])
self.assertListEqual(result.configuration['yum']['deps'], ['yahoo_python36;distro_version<"7.5', 'yahoo_python37;distro_version>="7.5"', 'mysql;distro_version<"7"', 'mariadb;distro_version>="7"'])
self.assertListEqual(result.configuration['pip3']['deps'], [])
if __name__ == '__main__':
unittest.main()
| 49.430769
| 208
| 0.628073
| 626
| 6,426
| 6.289137
| 0.183706
| 0.19304
| 0.2159
| 0.328169
| 0.7714
| 0.749556
| 0.737109
| 0.729997
| 0.729997
| 0.729997
| 0
| 0.016699
| 0.207905
| 6,426
| 129
| 209
| 49.813953
| 0.756778
| 0.023343
| 0
| 0.554545
| 0
| 0.018182
| 0.357564
| 0.111111
| 0
| 0
| 0
| 0
| 0.309091
| 1
| 0.063636
| false
| 0
| 0.045455
| 0
| 0.118182
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4120aee39a7211c3bb0f2eb714f5b186a237930d
| 110
|
py
|
Python
|
xinetzone/docs/tutorial/cudnn.py
|
daobook/tvm
|
a0dca482824ba9e18ec914b962ce31fcec0696e2
|
[
"Apache-2.0"
] | null | null | null |
xinetzone/docs/tutorial/cudnn.py
|
daobook/tvm
|
a0dca482824ba9e18ec914b962ce31fcec0696e2
|
[
"Apache-2.0"
] | 1
|
2022-02-16T15:48:57.000Z
|
2022-02-16T15:48:57.000Z
|
xinetzone/docs/tutorial/cudnn.py
|
xinetzone/tvm
|
6576b422da06ebd10a64d182f7f12d91d1d77387
|
[
"Apache-2.0"
] | null | null | null |
import os
os.environ["PATH"] += ":/usr/local/cuda/bin"
os.environ["LD_LIBRARY_PATH"]= "/usr/local/cuda/lib64"
| 27.5
| 54
| 0.7
| 18
| 110
| 4.166667
| 0.611111
| 0.24
| 0.32
| 0.426667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019417
| 0.063636
| 110
| 4
| 54
| 27.5
| 0.708738
| 0
| 0
| 0
| 0
| 0
| 0.540541
| 0.189189
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.